v4.19.13 snapshot.
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
new file mode 100644
index 0000000..a8c4ce0
--- /dev/null
+++ b/drivers/crypto/Kconfig
@@ -0,0 +1,764 @@
+
+menuconfig CRYPTO_HW
+	bool "Hardware crypto devices"
+	default y
+	---help---
+	  Say Y here to get to see options for hardware crypto devices and
+	  processors. This option alone does not add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped and disabled.
+
+if CRYPTO_HW
+
+config CRYPTO_DEV_PADLOCK
+	tristate "Support for VIA PadLock ACE"
+	depends on X86 && !UML
+	help
+	  Some VIA processors come with an integrated crypto engine
+	  (so called VIA PadLock ACE, Advanced Cryptography Engine)
+	  that provides instructions for very fast cryptographic
+	  operations with supported algorithms.
+	  
+	  The instructions are used only when the CPU supports them.
+	  Otherwise software encryption is used.
+
+config CRYPTO_DEV_PADLOCK_AES
+	tristate "PadLock driver for AES algorithm"
+	depends on CRYPTO_DEV_PADLOCK
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AES
+	help
+	  Use VIA PadLock for AES algorithm.
+
+	  Available in VIA C3 and newer CPUs.
+
+	  If unsure say M. The compiled module will be
+	  called padlock-aes.
+
+config CRYPTO_DEV_PADLOCK_SHA
+	tristate "PadLock driver for SHA1 and SHA256 algorithms"
+	depends on CRYPTO_DEV_PADLOCK
+	select CRYPTO_HASH
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	help
+	  Use VIA PadLock for SHA1/SHA256 algorithms.
+
+	  Available in VIA C7 and newer processors.
+
+	  If unsure say M. The compiled module will be
+	  called padlock-sha.
+
+config CRYPTO_DEV_GEODE
+	tristate "Support for the Geode LX AES engine"
+	depends on X86_32 && PCI
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	help
+	  Say 'Y' here to use the AMD Geode LX processor on-board AES
+	  engine for the CryptoAPI AES algorithm.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called geode-aes.
+
+config ZCRYPT
+	tristate "Support for s390 cryptographic adapters"
+	depends on S390
+	select HW_RANDOM
+	help
+	  Select this option if you want to enable support for
+	  s390 cryptographic adapters like:
+	  + PCI-X Cryptographic Coprocessor (PCIXCC)
+	  + Crypto Express 2,3,4 or 5 Coprocessor (CEXxC)
+	  + Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
+	  + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
+
+config PKEY
+	tristate "Kernel API for protected key handling"
+	depends on S390
+	depends on ZCRYPT
+	help
+	  With this option enabled the pkey kernel module provides an API
+	  for creation and handling of protected keys. Other parts of the
+	  kernel or userspace applications may use these functions.
+
+	  Select this option if you want to enable the kernel and userspace
+	  API for proteced key handling.
+
+	  Please note that creation of protected keys from secure keys
+	  requires to have at least one CEX card in coprocessor mode
+	  available at runtime.
+
+config CRYPTO_PAES_S390
+	tristate "PAES cipher algorithms"
+	depends on S390
+	depends on ZCRYPT
+	depends on PKEY
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	help
+	  This is the s390 hardware accelerated implementation of the
+	  AES cipher algorithms for use with protected key.
+
+	  Select this option if you want to use the paes cipher
+	  for example to use protected key encrypted devices.
+
+config CRYPTO_SHA1_S390
+	tristate "SHA1 digest algorithm"
+	depends on S390
+	select CRYPTO_HASH
+	help
+	  This is the s390 hardware accelerated implementation of the
+	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
+
+	  It is available as of z990.
+
+config CRYPTO_SHA256_S390
+	tristate "SHA256 digest algorithm"
+	depends on S390
+	select CRYPTO_HASH
+	help
+	  This is the s390 hardware accelerated implementation of the
+	  SHA256 secure hash standard (DFIPS 180-2).
+
+	  It is available as of z9.
+
+config CRYPTO_SHA512_S390
+	tristate "SHA384 and SHA512 digest algorithm"
+	depends on S390
+	select CRYPTO_HASH
+	help
+	  This is the s390 hardware accelerated implementation of the
+	  SHA512 secure hash standard.
+
+	  It is available as of z10.
+
+config CRYPTO_DES_S390
+	tristate "DES and Triple DES cipher algorithms"
+	depends on S390
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
+	help
+	  This is the s390 hardware accelerated implementation of the
+	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
+
+	  As of z990 the ECB and CBC mode are hardware accelerated.
+	  As of z196 the CTR mode is hardware accelerated.
+
+config CRYPTO_AES_S390
+	tristate "AES cipher algorithms"
+	depends on S390
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	help
+	  This is the s390 hardware accelerated implementation of the
+	  AES cipher algorithms (FIPS-197).
+
+	  As of z9 the ECB and CBC modes are hardware accelerated
+	  for 128 bit keys.
+	  As of z10 the ECB and CBC modes are hardware accelerated
+	  for all AES key sizes.
+	  As of z196 the CTR mode is hardware accelerated for all AES
+	  key sizes and XTS mode is hardware accelerated for 256 and
+	  512 bit keys.
+
+config S390_PRNG
+	tristate "Pseudo random number generator device driver"
+	depends on S390
+	default "m"
+	help
+	  Select this option if you want to use the s390 pseudo random number
+	  generator. The PRNG is part of the cryptographic processor functions
+	  and uses triple-DES to generate secure random numbers like the
+	  ANSI X9.17 standard. User-space programs access the
+	  pseudo-random-number device through the char device /dev/prandom.
+
+	  It is available as of z9.
+
+config CRYPTO_GHASH_S390
+	tristate "GHASH digest algorithm"
+	depends on S390
+	select CRYPTO_HASH
+	help
+	  This is the s390 hardware accelerated implementation of the
+	  GHASH message digest algorithm for GCM (Galois/Counter Mode).
+
+	  It is available as of z196.
+
+config CRYPTO_CRC32_S390
+	tristate "CRC-32 algorithms"
+	depends on S390
+	select CRYPTO_HASH
+	select CRC32
+	help
+	  Select this option if you want to use hardware accelerated
+	  implementations of CRC algorithms.  With this option, you
+	  can optimize the computation of CRC-32 (IEEE 802.3 Ethernet)
+	  and CRC-32C (Castagnoli).
+
+	  It is available with IBM z13 or later.
+
+config CRYPTO_DEV_MARVELL_CESA
+	tristate "Marvell's Cryptographic Engine driver"
+	depends on PLAT_ORION || ARCH_MVEBU
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_HASH
+	select SRAM
+	help
+	  This driver allows you to utilize the Cryptographic Engines and
+	  Security Accelerator (CESA) which can be found on MVEBU and ORION
+	  platforms.
+	  This driver supports CPU offload through DMA transfers.
+
+config CRYPTO_DEV_NIAGARA2
+       tristate "Niagara2 Stream Processing Unit driver"
+       select CRYPTO_DES
+       select CRYPTO_BLKCIPHER
+       select CRYPTO_HASH
+       select CRYPTO_MD5
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
+       depends on SPARC64
+       help
+	  Each core of a Niagara2 processor contains a Stream
+	  Processing Unit, which itself contains several cryptographic
+	  sub-units.  One set provides the Modular Arithmetic Unit,
+	  used for SSL offload.  The other set provides the Cipher
+	  Group, which can perform encryption, decryption, hashing,
+	  checksumming, and raw copies.
+
+config CRYPTO_DEV_HIFN_795X
+	tristate "Driver HIFN 795x crypto accelerator chips"
+	select CRYPTO_DES
+	select CRYPTO_BLKCIPHER
+	select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
+	depends on PCI
+	depends on !ARCH_DMA_ADDR_T_64BIT
+	help
+	  This option allows you to have support for HIFN 795x crypto adapters.
+
+config CRYPTO_DEV_HIFN_795X_RNG
+	bool "HIFN 795x random number generator"
+	depends on CRYPTO_DEV_HIFN_795X
+	help
+	  Select this option if you want to enable the random number generator
+	  on the HIFN 795x crypto adapters.
+
+source drivers/crypto/caam/Kconfig
+
+config CRYPTO_DEV_TALITOS
+	tristate "Talitos Freescale Security Engine (SEC)"
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_HASH
+	select HW_RANDOM
+	depends on FSL_SOC
+	help
+	  Say 'Y' here to use the Freescale Security Engine (SEC)
+	  to offload cryptographic algorithm computation.
+
+	  The Freescale SEC is present on PowerQUICC 'E' processors, such
+	  as the MPC8349E and MPC8548E.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called talitos.
+
+config CRYPTO_DEV_TALITOS1
+	bool "SEC1 (SEC 1.0 and SEC Lite 1.2)"
+	depends on CRYPTO_DEV_TALITOS
+	depends on PPC_8xx || PPC_82xx
+	default y
+	help
+	  Say 'Y' here to use the Freescale Security Engine (SEC) version 1.0
+	  found on MPC82xx or the Freescale Security Engine (SEC Lite)
+	  version 1.2 found on MPC8xx
+
+config CRYPTO_DEV_TALITOS2
+	bool "SEC2+ (SEC version 2.0 or upper)"
+	depends on CRYPTO_DEV_TALITOS
+	default y if !PPC_8xx
+	help
+	  Say 'Y' here to use the Freescale Security Engine (SEC)
+	  version 2 and following as found on MPC83xx, MPC85xx, etc ...
+
+config CRYPTO_DEV_IXP4XX
+	tristate "Driver for IXP4xx crypto hardware acceleration"
+	depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
+	select CRYPTO_DES
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	help
+	  Driver for the IXP4xx NPE crypto engine.
+
+config CRYPTO_DEV_PPC4XX
+	tristate "Driver AMCC PPC4xx crypto accelerator"
+	depends on PPC && 4xx
+	select CRYPTO_HASH
+	select CRYPTO_AEAD
+	select CRYPTO_AES
+	select CRYPTO_CCM
+	select CRYPTO_CTR
+	select CRYPTO_GCM
+	select CRYPTO_BLKCIPHER
+	help
+	  This option allows you to have support for AMCC crypto acceleration.
+
+config HW_RANDOM_PPC4XX
+	bool "PowerPC 4xx generic true random number generator support"
+	depends on CRYPTO_DEV_PPC4XX && HW_RANDOM
+	default y
+	---help---
+	 This option provides the kernel-side support for the TRNG hardware
+	 found in the security function of some PowerPC 4xx SoCs.
+
+config CRYPTO_DEV_OMAP
+	tristate "Support for OMAP crypto HW accelerators"
+	depends on ARCH_OMAP2PLUS
+	help
+	  OMAP processors have various crypto HW accelerators. Select this if
+          you want to use the OMAP modules for any of the crypto algorithms.
+
+if CRYPTO_DEV_OMAP
+
+config CRYPTO_DEV_OMAP_SHAM
+	tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
+	depends on ARCH_OMAP2PLUS
+	select CRYPTO_SHA1
+	select CRYPTO_MD5
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	select CRYPTO_HMAC
+	help
+	  OMAP processors have MD5/SHA1/SHA2 hw accelerator. Select this if you
+	  want to use the OMAP module for MD5/SHA1/SHA2 algorithms.
+
+config CRYPTO_DEV_OMAP_AES
+	tristate "Support for OMAP AES hw engine"
+	depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
+	select CRYPTO_AES
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_ENGINE
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_CTR
+	select CRYPTO_AEAD
+	help
+	  OMAP processors have AES module accelerator. Select this if you
+	  want to use the OMAP module for AES algorithms.
+
+config CRYPTO_DEV_OMAP_DES
+	tristate "Support for OMAP DES/3DES hw engine"
+	depends on ARCH_OMAP2PLUS
+	select CRYPTO_DES
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_ENGINE
+	help
+	  OMAP processors have DES/3DES module accelerator. Select this if you
+	  want to use the OMAP module for DES and 3DES algorithms. Currently
+	  the ECB and CBC modes of operation are supported by the driver. Also
+	  accesses made on unaligned boundaries are supported.
+
+endif # CRYPTO_DEV_OMAP
+
+config CRYPTO_DEV_PICOXCELL
+	tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
+	depends on (ARCH_PICOXCELL || COMPILE_TEST) && HAVE_CLK
+	select CRYPTO_AEAD
+	select CRYPTO_AES
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_SEQIV
+	help
+	  This option enables support for the hardware offload engines in the
+	  Picochip picoXcell SoC devices. Select this for IPSEC ESP offload
+	  and for 3gpp Layer 2 ciphering support.
+
+	  Saying m here will build a module named pipcoxcell_crypto.
+
+config CRYPTO_DEV_SAHARA
+	tristate "Support for SAHARA crypto accelerator"
+	depends on ARCH_MXC && OF
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AES
+	select CRYPTO_ECB
+	help
+	  This option enables support for the SAHARA HW crypto accelerator
+	  found in some Freescale i.MX chips.
+
+config CRYPTO_DEV_MXC_SCC
+	tristate "Support for Freescale Security Controller (SCC)"
+	depends on ARCH_MXC && OF
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
+	help
+	  This option enables support for the Security Controller (SCC)
+	  found in Freescale i.MX25 chips.
+
+config CRYPTO_DEV_EXYNOS_RNG
+	tristate "EXYNOS HW pseudo random number generator support"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	depends on HAS_IOMEM
+	select CRYPTO_RNG
+	---help---
+	  This driver provides kernel-side support through the
+	  cryptographic API for the pseudo random number generator hardware
+	  found on Exynos SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called exynos-rng.
+
+	  If unsure, say Y.
+
+config CRYPTO_DEV_S5P
+	tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
+	depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+	depends on HAS_IOMEM
+	select CRYPTO_AES
+	select CRYPTO_BLKCIPHER
+	help
+	  This option allows you to have support for S5P crypto acceleration.
+	  Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
+	  algorithms execution.
+
+config CRYPTO_DEV_EXYNOS_HASH
+	bool "Support for Samsung Exynos HASH accelerator"
+	depends on CRYPTO_DEV_S5P
+	depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m
+	select CRYPTO_SHA1
+	select CRYPTO_MD5
+	select CRYPTO_SHA256
+	help
+	  Select this to offload Exynos from HASH MD5/SHA1/SHA256.
+	  This will select software SHA1, MD5 and SHA256 as they are
+	  needed for small and zero-size messages.
+	  HASH algorithms will be disabled if EXYNOS_RNG
+	  is enabled due to hw conflict.
+
+config CRYPTO_DEV_NX
+	bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
+	depends on PPC64
+	help
+	  This enables support for the NX hardware cryptographic accelerator
+	  coprocessor that is in IBM PowerPC P7+ or later processors.  This
+	  does not actually enable any drivers, it only allows you to select
+	  which acceleration type (encryption and/or compression) to enable.
+
+if CRYPTO_DEV_NX
+	source "drivers/crypto/nx/Kconfig"
+endif
+
+config CRYPTO_DEV_UX500
+	tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
+	depends on ARCH_U8500
+	help
+	  Driver for ST-Ericsson UX500 crypto engine.
+
+if CRYPTO_DEV_UX500
+	source "drivers/crypto/ux500/Kconfig"
+endif # if CRYPTO_DEV_UX500
+
+config CRYPTO_DEV_ATMEL_AUTHENC
+	tristate "Support for Atmel IPSEC/SSL hw accelerator"
+	depends on ARCH_AT91 || COMPILE_TEST
+	select CRYPTO_AUTHENC
+	select CRYPTO_DEV_ATMEL_AES
+	select CRYPTO_DEV_ATMEL_SHA
+	help
+	  Some Atmel processors can combine the AES and SHA hw accelerators
+	  to enhance support of IPSEC/SSL.
+	  Select this if you want to use the Atmel modules for
+	  authenc(hmac(shaX),Y(cbc)) algorithms.
+
+config CRYPTO_DEV_ATMEL_AES
+	tristate "Support for Atmel AES hw accelerator"
+	depends on ARCH_AT91 || COMPILE_TEST
+	select CRYPTO_AES
+	select CRYPTO_AEAD
+	select CRYPTO_BLKCIPHER
+	help
+	  Some Atmel processors have AES hw accelerator.
+	  Select this if you want to use the Atmel module for
+	  AES algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called atmel-aes.
+
+config CRYPTO_DEV_ATMEL_TDES
+	tristate "Support for Atmel DES/TDES hw accelerator"
+	depends on ARCH_AT91 || COMPILE_TEST
+	select CRYPTO_DES
+	select CRYPTO_BLKCIPHER
+	help
+	  Some Atmel processors have DES/TDES hw accelerator.
+	  Select this if you want to use the Atmel module for
+	  DES/TDES algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called atmel-tdes.
+
+config CRYPTO_DEV_ATMEL_SHA
+	tristate "Support for Atmel SHA hw accelerator"
+	depends on ARCH_AT91 || COMPILE_TEST
+	select CRYPTO_HASH
+	help
+	  Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512
+	  hw accelerator.
+	  Select this if you want to use the Atmel module for
+	  SHA1/SHA224/SHA256/SHA384/SHA512 algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called atmel-sha.
+
+config CRYPTO_DEV_ATMEL_ECC
+	tristate "Support for Microchip / Atmel ECC hw accelerator"
+	depends on ARCH_AT91 || COMPILE_TEST
+	depends on I2C
+	select CRYPTO_ECDH
+	select CRC16
+	help
+	  Microhip / Atmel ECC hw accelerator.
+	  Select this if you want to use the Microchip / Atmel module for
+	  ECDH algorithm.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called atmel-ecc.
+
+config CRYPTO_DEV_CCP
+	bool "Support for AMD Secure Processor"
+	depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
+	help
+	  The AMD Secure Processor provides support for the Cryptographic Coprocessor
+	  (CCP) and the Platform Security Processor (PSP) devices.
+
+if CRYPTO_DEV_CCP
+	source "drivers/crypto/ccp/Kconfig"
+endif
+
+config CRYPTO_DEV_MXS_DCP
+	tristate "Support for Freescale MXS DCP"
+	depends on (ARCH_MXS || ARCH_MXC)
+	select STMP_DEVICE
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_AES
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_HASH
+	help
+	  The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB
+	  co-processor on the die.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called mxs-dcp.
+
+source "drivers/crypto/qat/Kconfig"
+source "drivers/crypto/cavium/cpt/Kconfig"
+source "drivers/crypto/cavium/nitrox/Kconfig"
+
+config CRYPTO_DEV_CAVIUM_ZIP
+	tristate "Cavium ZIP driver"
+	depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
+	---help---
+	  Select this option if you want to enable compression/decompression
+	  acceleration on Cavium's ARM based SoCs
+
+config CRYPTO_DEV_QCE
+	tristate "Qualcomm crypto engine accelerator"
+	depends on ARCH_QCOM || COMPILE_TEST
+	depends on HAS_IOMEM
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_ECB
+	select CRYPTO_CBC
+	select CRYPTO_XTS
+	select CRYPTO_CTR
+	select CRYPTO_BLKCIPHER
+	help
+	  This driver supports Qualcomm crypto engine accelerator
+	  hardware. To compile this driver as a module, choose M here. The
+	  module will be called qcrypto.
+
+config CRYPTO_DEV_QCOM_RNG
+	tristate "Qualcomm Random Number Generator Driver"
+	depends on ARCH_QCOM || COMPILE_TEST
+	select CRYPTO_RNG
+	help
+	  This driver provides support for the Random Number
+	  Generator hardware found on Qualcomm SoCs.
+
+	  To compile this driver as a module, choose M here. The
+          module will be called qcom-rng. If unsure, say N.
+
+config CRYPTO_DEV_VMX
+	bool "Support for VMX cryptographic acceleration instructions"
+	depends on PPC64 && VSX
+	help
+	  Support for VMX cryptographic acceleration instructions.
+
+source "drivers/crypto/vmx/Kconfig"
+
+config CRYPTO_DEV_IMGTEC_HASH
+	tristate "Imagination Technologies hardware hash accelerator"
+	depends on MIPS || COMPILE_TEST
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_HASH
+	help
+	  This driver interfaces with the Imagination Technologies
+	  hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
+	  hashing algorithms.
+
+config CRYPTO_DEV_SUN4I_SS
+	tristate "Support for Allwinner Security System cryptographic accelerator"
+	depends on ARCH_SUNXI && !64BIT
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_BLKCIPHER
+	help
+	  Some Allwinner SoC have a crypto accelerator named
+	  Security System. Select this if you want to use it.
+	  The Security System handle AES/DES/3DES ciphers in CBC mode
+	  and SHA1 and MD5 hash algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sun4i-ss.
+
+config CRYPTO_DEV_SUN4I_SS_PRNG
+	bool "Support for Allwinner Security System PRNG"
+	depends on CRYPTO_DEV_SUN4I_SS
+	select CRYPTO_RNG
+	help
+	  Select this option if you want to provide kernel-side support for
+	  the Pseudo-Random Number Generator found in the Security System.
+
+config CRYPTO_DEV_ROCKCHIP
+	tristate "Rockchip's Cryptographic Engine driver"
+	depends on OF && ARCH_ROCKCHIP
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_HASH
+	select CRYPTO_BLKCIPHER
+
+	help
+	  This driver interfaces with the hardware crypto accelerator.
+	  Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+
+config CRYPTO_DEV_MEDIATEK
+	tristate "MediaTek's EIP97 Cryptographic Engine driver"
+	depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
+	select CRYPTO_AES
+	select CRYPTO_AEAD
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_CTR
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	select CRYPTO_HMAC
+	help
+	  This driver allows you to utilize the hardware crypto accelerator
+	  EIP97 which can be found on the MT7623 MT2701, MT8521p, etc ....
+	  Select this if you want to use it for AES/SHA1/SHA2 algorithms.
+
+source "drivers/crypto/chelsio/Kconfig"
+
+source "drivers/crypto/virtio/Kconfig"
+
+config CRYPTO_DEV_BCM_SPU
+	tristate "Broadcom symmetric crypto/hash acceleration support"
+	depends on ARCH_BCM_IPROC
+	depends on MAILBOX
+	default m
+	select CRYPTO_DES
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	help
+	  This driver provides support for Broadcom crypto acceleration using the
+	  Secure Processing Unit (SPU). The SPU driver registers ablkcipher,
+	  ahash, and aead algorithms with the kernel cryptographic API.
+
+source "drivers/crypto/stm32/Kconfig"
+
+config CRYPTO_DEV_SAFEXCEL
+	tristate "Inside Secure's SafeXcel cryptographic engine driver"
+	depends on OF
+	depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT)
+	select CRYPTO_AES
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
+	select CRYPTO_HASH
+	select CRYPTO_HMAC
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	help
+	  This driver interfaces with the SafeXcel EIP-197 cryptographic engine
+	  designed by Inside Secure. Select this if you want to use CBC/ECB
+	  chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash
+	  algorithms.
+
+config CRYPTO_DEV_ARTPEC6
+	tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration."
+	depends on ARM && (ARCH_ARTPEC || COMPILE_TEST)
+	depends on OF
+	select CRYPTO_AEAD
+	select CRYPTO_AES
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_CTR
+	select CRYPTO_HASH
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	help
+	  Enables the driver for the on-chip crypto accelerator
+	  of Axis ARTPEC SoCs.
+
+	  To compile this driver as a module, choose M here.
+
+config CRYPTO_DEV_CCREE
+	tristate "Support for ARM TrustZone CryptoCell family of security processors"
+	depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
+	default n
+	select CRYPTO_HASH
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_SHA1
+	select CRYPTO_MD5
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	select CRYPTO_HMAC
+	select CRYPTO_AES
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_CTR
+	select CRYPTO_XTS
+	help
+	  Say 'Y' to enable a driver for the REE interface of the Arm
+	  TrustZone CryptoCell family of processors. Currently the
+	  CryptoCell 712, 710 and 630 are supported.
+	  Choose this if you wish to use hardware acceleration of
+	  cryptographic operations on the system REE.
+	  If unsure say Y.
+
+source "drivers/crypto/hisilicon/Kconfig"
+
+endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
new file mode 100644
index 0000000..c23396f
--- /dev/null
+++ b/drivers/crypto/Makefile
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
+obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
+obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
+obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
+obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
+obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
+obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
+obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
+obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
+obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
+obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-y := n2_core.o n2_asm.o
+obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
+obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
+omap-aes-driver-objs := omap-aes.o omap-aes-gcm.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
+obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
+obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
+obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_ARCH_STM32) += stm32/
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
+obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
+obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
+obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
+obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
+obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
+obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-y += hisilicon/
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
new file mode 100644
index 0000000..e33c185
--- /dev/null
+++ b/drivers/crypto/amcc/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
+crypto4xx-y :=  crypto4xx_core.o crypto4xx_alg.o
+crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
new file mode 100644
index 0000000..f5c0749
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -0,0 +1,740 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file implements the Linux crypto algorithms.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/sha.h>
+#include <crypto/ctr.h>
+#include <crypto/skcipher.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
+
+static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
+				     u32 save_iv, u32 ld_h, u32 ld_iv,
+				     u32 hdr_proc, u32 h, u32 c, u32 pad_type,
+				     u32 op_grp, u32 op, u32 dir)
+{
+	sa->sa_command_0.w = 0;
+	sa->sa_command_0.bf.save_hash_state = save_h;
+	sa->sa_command_0.bf.save_iv = save_iv;
+	sa->sa_command_0.bf.load_hash_state = ld_h;
+	sa->sa_command_0.bf.load_iv = ld_iv;
+	sa->sa_command_0.bf.hdr_proc = hdr_proc;
+	sa->sa_command_0.bf.hash_alg = h;
+	sa->sa_command_0.bf.cipher_alg = c;
+	sa->sa_command_0.bf.pad_type = pad_type & 3;
+	sa->sa_command_0.bf.extend_pad = pad_type >> 2;
+	sa->sa_command_0.bf.op_group = op_grp;
+	sa->sa_command_0.bf.opcode = op;
+	sa->sa_command_0.bf.dir = dir;
+}
+
+static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
+				     u32 hmac_mc, u32 cfb, u32 esn,
+				     u32 sn_mask, u32 mute, u32 cp_pad,
+				     u32 cp_pay, u32 cp_hdr)
+{
+	sa->sa_command_1.w = 0;
+	sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
+	sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
+	sa->sa_command_1.bf.feedback_mode = cfb,
+	sa->sa_command_1.bf.sa_rev = 1;
+	sa->sa_command_1.bf.hmac_muting = hmac_mc;
+	sa->sa_command_1.bf.extended_seq_num = esn;
+	sa->sa_command_1.bf.seq_num_mask = sn_mask;
+	sa->sa_command_1.bf.mutable_bit_proc = mute;
+	sa->sa_command_1.bf.copy_pad = cp_pad;
+	sa->sa_command_1.bf.copy_payload = cp_pay;
+	sa->sa_command_1.bf.copy_hdr = cp_hdr;
+}
+
+static inline int crypto4xx_crypt(struct skcipher_request *req,
+				  const unsigned int ivlen, bool decrypt)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+	__le32 iv[AES_IV_SIZE];
+
+	if (ivlen)
+		crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+		req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
+		ctx->sa_len, 0, NULL);
+}
+
+int crypto4xx_encrypt_noiv(struct skcipher_request *req)
+{
+	return crypto4xx_crypt(req, 0, false);
+}
+
+int crypto4xx_encrypt_iv(struct skcipher_request *req)
+{
+	return crypto4xx_crypt(req, AES_IV_SIZE, false);
+}
+
+int crypto4xx_decrypt_noiv(struct skcipher_request *req)
+{
+	return crypto4xx_crypt(req, 0, true);
+}
+
+int crypto4xx_decrypt_iv(struct skcipher_request *req)
+{
+	return crypto4xx_crypt(req, AES_IV_SIZE, true);
+}
+
+/**
+ * AES Functions
+ */
+static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
+				const u8 *key,
+				unsigned int keylen,
+				unsigned char cm,
+				u8 fb)
+{
+	struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct dynamic_sa_ctl *sa;
+	int    rc;
+
+	if (keylen != AES_KEYSIZE_256 &&
+		keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
+		crypto_skcipher_set_flags(cipher,
+				CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	/* Create SA */
+	if (ctx->sa_in || ctx->sa_out)
+		crypto4xx_free_sa(ctx);
+
+	rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
+	if (rc)
+		return rc;
+
+	/* Setup SA */
+	sa = ctx->sa_in;
+
+	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
+				 SA_SAVE_IV : SA_NOT_SAVE_IV),
+				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+				 SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+				 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+				 SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
+				 DIR_INBOUND);
+
+	set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
+				 fb, SA_EXTENDED_SN_OFF,
+				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+				 SA_NOT_COPY_HDR);
+	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+				 key, keylen);
+	sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
+	sa->sa_command_1.bf.key_len = keylen >> 3;
+
+	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+	sa = ctx->sa_out;
+	sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+	return 0;
+}
+
+int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC,
+				    CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+				    CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+				    CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+				    CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+	int rc;
+
+	rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
+		CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+	if (rc)
+		return rc;
+
+	ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
+						 CTR_RFC3686_NONCE_SIZE]);
+
+	return 0;
+}
+
+int crypto4xx_rfc3686_encrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+	__le32 iv[AES_IV_SIZE / 4] = {
+		ctx->iv_nonce,
+		cpu_to_le32p((u32 *) req->iv),
+		cpu_to_le32p((u32 *) (req->iv + 4)),
+		cpu_to_le32(1) };
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+				  req->cryptlen, iv, AES_IV_SIZE,
+				  ctx->sa_out, ctx->sa_len, 0, NULL);
+}
+
+int crypto4xx_rfc3686_decrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+	__le32 iv[AES_IV_SIZE / 4] = {
+		ctx->iv_nonce,
+		cpu_to_le32p((u32 *) req->iv),
+		cpu_to_le32p((u32 *) (req->iv + 4)),
+		cpu_to_le32(1) };
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+				  req->cryptlen, iv, AES_IV_SIZE,
+				  ctx->sa_out, ctx->sa_len, 0, NULL);
+}
+
+static int
+crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+	size_t iv_len = crypto_skcipher_ivsize(cipher);
+	unsigned int counter = be32_to_cpup((__be32 *)(req->iv + iv_len - 4));
+	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
+			AES_BLOCK_SIZE;
+
+	/*
+	 * The hardware uses only the last 32-bits as the counter while the
+	 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
+	 * the whole IV is a counter.  So fallback if the counter is going to
+	 * overlow.
+	 */
+	if (counter + nblks < counter) {
+		struct skcipher_request *subreq = skcipher_request_ctx(req);
+		int ret;
+
+		skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
+		skcipher_request_set_callback(subreq, req->base.flags,
+			NULL, NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+			req->cryptlen, req->iv);
+		ret = encrypt ? crypto_skcipher_encrypt(subreq)
+			: crypto_skcipher_decrypt(subreq);
+		skcipher_request_zero(subreq);
+		return ret;
+	}
+
+	return encrypt ? crypto4xx_encrypt_iv(req)
+		       : crypto4xx_decrypt_iv(req);
+}
+
+static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
+				       struct crypto_skcipher *cipher,
+				       const u8 *key,
+				       unsigned int keylen)
+{
+	int rc;
+
+	crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
+				    CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
+		crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+	rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
+	crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+	crypto_skcipher_set_flags(cipher,
+		crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
+			CRYPTO_TFM_RES_MASK);
+
+	return rc;
+}
+
+int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+	int rc;
+
+	rc = crypto4xx_sk_setup_fallback(ctx, cipher, key, keylen);
+	if (rc)
+		return rc;
+
+	return crypto4xx_setkey_aes(cipher, key, keylen,
+		CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_encrypt_ctr(struct skcipher_request *req)
+{
+	return crypto4xx_ctr_crypt(req, true);
+}
+
+int crypto4xx_decrypt_ctr(struct skcipher_request *req)
+{
+	return crypto4xx_ctr_crypt(req, false);
+}
+
+static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+						unsigned int len,
+						bool is_ccm, bool decrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+	/* authsize has to be a multiple of 4 */
+	if (aead->authsize & 3)
+		return true;
+
+	/*
+	 * hardware does not handle cases where plaintext
+	 * is less than a block.
+	 */
+	if (len < AES_BLOCK_SIZE)
+		return true;
+
+	/* assoc len needs to be a multiple of 4 and <= 1020 */
+	if (req->assoclen & 0x3 || req->assoclen > 1020)
+		return true;
+
+	/* CCM supports only counter field length of 2 and 4 bytes */
+	if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+		return true;
+
+	return false;
+}
+
+static int crypto4xx_aead_fallback(struct aead_request *req,
+	struct crypto4xx_ctx *ctx, bool do_decrypt)
+{
+	struct aead_request *subreq = aead_request_ctx(req);
+
+	aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
+	aead_request_set_callback(subreq, req->base.flags,
+				  req->base.complete, req->base.data);
+	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+			       req->iv);
+	aead_request_set_ad(subreq, req->assoclen);
+	return do_decrypt ? crypto_aead_decrypt(subreq) :
+			    crypto_aead_encrypt(subreq);
+}
+
+static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
+					 struct crypto_aead *cipher,
+					 const u8 *key,
+					 unsigned int keylen)
+{
+	int rc;
+
+	crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(ctx->sw_cipher.aead,
+		crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+	rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
+	crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(cipher,
+		crypto_aead_get_flags(ctx->sw_cipher.aead) &
+			CRYPTO_TFM_RES_MASK);
+
+	return rc;
+}
+
+/**
+ * AES-CCM Functions
+ */
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+			     unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct dynamic_sa_ctl *sa;
+	int rc = 0;
+
+	rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
+	if (rc)
+		return rc;
+
+	if (ctx->sa_in || ctx->sa_out)
+		crypto4xx_free_sa(ctx);
+
+	rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
+	if (rc)
+		return rc;
+
+	/* Setup SA */
+	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+	sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+				 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+				 SA_CIPHER_ALG_AES,
+				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+				 SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+				 SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+				 SA_NOT_COPY_HDR);
+
+	sa->sa_command_1.bf.key_len = keylen >> 3;
+
+	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
+
+	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+				 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+				 SA_CIPHER_ALG_AES,
+				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+				 SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+
+	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+				 SA_COPY_PAD, SA_COPY_PAYLOAD,
+				 SA_NOT_COPY_HDR);
+
+	sa->sa_command_1.bf.key_len = keylen >> 3;
+	return 0;
+}
+
+static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
+{
+	struct crypto4xx_ctx *ctx  = crypto_tfm_ctx(req->base.tfm);
+	struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	__le32 iv[16];
+	u32 tmp_sa[SA_AES128_CCM_LEN + 4];
+	struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
+	unsigned int len = req->cryptlen;
+
+	if (decrypt)
+		len -= crypto_aead_authsize(aead);
+
+	if (crypto4xx_aead_need_fallback(req, len, true, decrypt))
+		return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+	memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4);
+	sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
+
+	if (req->iv[0] == 1) {
+		/* CRYPTO_MODE_AES_ICM */
+		sa->sa_command_1.bf.crypto_mode9_8 = 1;
+	}
+
+	iv[3] = cpu_to_le32(0);
+	crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+				  len, iv, sizeof(iv),
+				  sa, ctx->sa_len, req->assoclen, rctx->dst);
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+	return crypto4xx_crypt_aes_ccm(req, false);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+	return crypto4xx_crypt_aes_ccm(req, true);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
+			       unsigned int authsize)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
+}
+
+/**
+ * AES-GCM Functions
+ */
+
+static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+	switch (keylen) {
+	case 16:
+	case 24:
+	case 32:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
+					     unsigned int keylen)
+{
+	struct crypto_cipher *aes_tfm = NULL;
+	uint8_t src[16] = { 0 };
+	int rc = 0;
+
+	aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
+				      CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(aes_tfm)) {
+		rc = PTR_ERR(aes_tfm);
+		pr_warn("could not load aes cipher driver: %d\n", rc);
+		return rc;
+	}
+
+	rc = crypto_cipher_setkey(aes_tfm, key, keylen);
+	if (rc) {
+		pr_err("setkey() failed: %d\n", rc);
+		goto out;
+	}
+
+	crypto_cipher_encrypt_one(aes_tfm, src, src);
+	crypto4xx_memcpy_to_le32(hash_start, src, 16);
+out:
+	crypto_free_cipher(aes_tfm);
+	return rc;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct dynamic_sa_ctl *sa;
+	int    rc = 0;
+
+	if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
+		crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
+	if (rc)
+		return rc;
+
+	if (ctx->sa_in || ctx->sa_out)
+		crypto4xx_free_sa(ctx);
+
+	rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
+	if (rc)
+		return rc;
+
+	sa  = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+	sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
+	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+				 SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+				 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+				 SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+				 DIR_INBOUND);
+	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+				 SA_SEQ_MASK_ON, SA_MC_DISABLE,
+				 SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+				 SA_NOT_COPY_HDR);
+
+	sa->sa_command_1.bf.key_len = keylen >> 3;
+
+	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+				 key, keylen);
+
+	rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
+		key, keylen);
+	if (rc) {
+		pr_err("GCM hash key setting failed = %d\n", rc);
+		goto err;
+	}
+
+	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+	sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+	sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+	return 0;
+err:
+	crypto4xx_free_sa(ctx);
+	return rc;
+}
+
+static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
+					  bool decrypt)
+{
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
+	__le32 iv[4];
+	unsigned int len = req->cryptlen;
+
+	if (decrypt)
+		len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+	if (crypto4xx_aead_need_fallback(req, len, false, decrypt))
+		return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+	crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
+	iv[3] = cpu_to_le32(1);
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+				  len, iv, sizeof(iv),
+				  decrypt ? ctx->sa_in : ctx->sa_out,
+				  ctx->sa_len, req->assoclen, rctx->dst);
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+	return crypto4xx_crypt_aes_gcm(req, false);
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+	return crypto4xx_crypt_aes_gcm(req, true);
+}
+
+/**
+ * HASH SHA1 Functions
+ */
+static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
+				   unsigned int sa_len,
+				   unsigned char ha,
+				   unsigned char hm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct crypto4xx_alg *my_alg;
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct dynamic_sa_hash160 *sa;
+	int rc;
+
+	my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
+			      alg.u.hash);
+	ctx->dev   = my_alg->dev;
+
+	/* Create SA */
+	if (ctx->sa_in || ctx->sa_out)
+		crypto4xx_free_sa(ctx);
+
+	rc = crypto4xx_alloc_sa(ctx, sa_len);
+	if (rc)
+		return rc;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct crypto4xx_ctx));
+	sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
+	set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+				 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+				 SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
+				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+				 SA_OPCODE_HASH, DIR_INBOUND);
+	set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
+				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+				 SA_NOT_COPY_HDR);
+	/* Need to zero hash digest in SA */
+	memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
+	memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
+
+	return 0;
+}
+
+int crypto4xx_hash_init(struct ahash_request *req)
+{
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	int ds;
+	struct dynamic_sa_ctl *sa;
+
+	sa = ctx->sa_in;
+	ds = crypto_ahash_digestsize(
+			__crypto_ahash_cast(req->base.tfm));
+	sa->sa_command_0.bf.digest_len = ds >> 2;
+	sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
+
+	return 0;
+}
+
+int crypto4xx_hash_update(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct scatterlist dst;
+	unsigned int ds = crypto_ahash_digestsize(ahash);
+
+	sg_init_one(&dst, req->result, ds);
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+				  req->nbytes, NULL, 0, ctx->sa_in,
+				  ctx->sa_len, 0, NULL);
+}
+
+int crypto4xx_hash_final(struct ahash_request *req)
+{
+	return 0;
+}
+
+int crypto4xx_hash_digest(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct scatterlist dst;
+	unsigned int ds = crypto_ahash_digestsize(ahash);
+
+	sg_init_one(&dst, req->result, ds);
+
+	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+				  req->nbytes, NULL, 0, ctx->sa_in,
+				  ctx->sa_len, 0, NULL);
+}
+
+/**
+ * SHA1 Algorithm
+ */
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
+{
+	return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
+				       SA_HASH_MODE_HASH);
+}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
new file mode 100644
index 0000000..6eaec9b
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -0,0 +1,1467 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file implements AMCC crypto offload Linux device driver for use with
+ * Linux CryptoAPI.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/cacheflush.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/gcm.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
+#include "crypto4xx_trng.h"
+
+#define PPC4XX_SEC_VERSION_STR			"0.5"
+
+/**
+ * PPC4xx Crypto Engine Initialization Routine
+ */
+static void crypto4xx_hw_init(struct crypto4xx_device *dev)
+{
+	union ce_ring_size ring_size;
+	union ce_ring_control ring_ctrl;
+	union ce_part_ring_size part_ring_size;
+	union ce_io_threshold io_threshold;
+	u32 rand_num;
+	union ce_pe_dma_cfg pe_dma_cfg;
+	u32 device_ctrl;
+
+	writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
+	/* setup pe dma, include reset sg, pdr and pe, then release reset */
+	pe_dma_cfg.w = 0;
+	pe_dma_cfg.bf.bo_sgpd_en = 1;
+	pe_dma_cfg.bf.bo_data_en = 0;
+	pe_dma_cfg.bf.bo_sa_en = 1;
+	pe_dma_cfg.bf.bo_pd_en = 1;
+	pe_dma_cfg.bf.dynamic_sa_en = 1;
+	pe_dma_cfg.bf.reset_sg = 1;
+	pe_dma_cfg.bf.reset_pdr = 1;
+	pe_dma_cfg.bf.reset_pe = 1;
+	writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+	/* un reset pe,sg and pdr */
+	pe_dma_cfg.bf.pe_mode = 0;
+	pe_dma_cfg.bf.reset_sg = 0;
+	pe_dma_cfg.bf.reset_pdr = 0;
+	pe_dma_cfg.bf.reset_pe = 0;
+	pe_dma_cfg.bf.bo_td_en = 0;
+	writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+	writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
+	writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
+	writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+	get_random_bytes(&rand_num, sizeof(rand_num));
+	writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
+	get_random_bytes(&rand_num, sizeof(rand_num));
+	writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
+	ring_size.w = 0;
+	ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
+	ring_size.bf.ring_size   = PPC4XX_NUM_PD;
+	writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
+	ring_ctrl.w = 0;
+	writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
+	device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+	device_ctrl |= PPC4XX_DC_3DES_EN;
+	writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+	writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
+	writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
+	part_ring_size.w = 0;
+	part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
+	part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
+	writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
+	writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
+	io_threshold.w = 0;
+	io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
+	io_threshold.bf.input_threshold  = PPC4XX_INPUT_THRESHOLD;
+	writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
+	writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
+	writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
+	writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
+	writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
+	writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
+	writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
+	writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
+	/* un reset pe,sg and pdr */
+	pe_dma_cfg.bf.pe_mode = 1;
+	pe_dma_cfg.bf.reset_sg = 0;
+	pe_dma_cfg.bf.reset_pdr = 0;
+	pe_dma_cfg.bf.reset_pe = 0;
+	pe_dma_cfg.bf.bo_td_en = 0;
+	writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+	/*clear all pending interrupt*/
+	writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
+	writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+	writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+	writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
+	if (dev->is_revb) {
+		writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+		       dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+		writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+		       dev->ce_base + CRYPTO4XX_INT_EN);
+	} else {
+		writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+	}
+}
+
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
+{
+	ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
+	if (ctx->sa_in == NULL)
+		return -ENOMEM;
+
+	ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
+	if (ctx->sa_out == NULL) {
+		kfree(ctx->sa_in);
+		ctx->sa_in = NULL;
+		return -ENOMEM;
+	}
+
+	ctx->sa_len = size;
+
+	return 0;
+}
+
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
+{
+	kfree(ctx->sa_in);
+	ctx->sa_in = NULL;
+	kfree(ctx->sa_out);
+	ctx->sa_out = NULL;
+	ctx->sa_len = 0;
+}
+
+/**
+ * alloc memory for the gather ring
+ * no need to alloc buf for the ring
+ * gdr_tail, gdr_head and gdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
+{
+	int i;
+	dev->pdr = dma_alloc_coherent(dev->core_dev->device,
+				      sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+				      &dev->pdr_pa, GFP_ATOMIC);
+	if (!dev->pdr)
+		return -ENOMEM;
+
+	dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
+				 GFP_KERNEL);
+	if (!dev->pdr_uinfo) {
+		dma_free_coherent(dev->core_dev->device,
+				  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+				  dev->pdr,
+				  dev->pdr_pa);
+		return -ENOMEM;
+	}
+	memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+	dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
+				   sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+				   &dev->shadow_sa_pool_pa,
+				   GFP_ATOMIC);
+	if (!dev->shadow_sa_pool)
+		return -ENOMEM;
+
+	dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
+			 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+			 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
+	if (!dev->shadow_sr_pool)
+		return -ENOMEM;
+	for (i = 0; i < PPC4XX_NUM_PD; i++) {
+		struct ce_pd *pd = &dev->pdr[i];
+		struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
+
+		pd->sa = dev->shadow_sa_pool_pa +
+			sizeof(union shadow_sa_buf) * i;
+
+		/* alloc 256 bytes which is enough for any kind of dynamic sa */
+		pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
+
+		/* alloc state record */
+		pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
+		pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
+		    sizeof(struct sa_state_record) * i;
+	}
+
+	return 0;
+}
+
+static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
+{
+	if (dev->pdr)
+		dma_free_coherent(dev->core_dev->device,
+				  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+				  dev->pdr, dev->pdr_pa);
+
+	if (dev->shadow_sa_pool)
+		dma_free_coherent(dev->core_dev->device,
+			sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+			dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+
+	if (dev->shadow_sr_pool)
+		dma_free_coherent(dev->core_dev->device,
+			sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+			dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
+
+	kfree(dev->pdr_uinfo);
+}
+
+static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
+{
+	u32 retval;
+	u32 tmp;
+
+	retval = dev->pdr_head;
+	tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
+
+	if (tmp == dev->pdr_tail)
+		return ERING_WAS_FULL;
+
+	dev->pdr_head = tmp;
+
+	return retval;
+}
+
+static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
+{
+	struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+	u32 tail;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->core_dev->lock, flags);
+	pd_uinfo->state = PD_ENTRY_FREE;
+
+	if (dev->pdr_tail != PPC4XX_LAST_PD)
+		dev->pdr_tail++;
+	else
+		dev->pdr_tail = 0;
+	tail = dev->pdr_tail;
+	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+	return tail;
+}
+
+/**
+ * alloc memory for the gather ring
+ * no need to alloc buf for the ring
+ * gdr_tail, gdr_head and gdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
+{
+	dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
+				       sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+				       &dev->gdr_pa, GFP_ATOMIC);
+	if (!dev->gdr)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
+{
+	dma_free_coherent(dev->core_dev->device,
+			  sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+			  dev->gdr, dev->gdr_pa);
+}
+
+/*
+ * when this function is called.
+ * preemption or interrupt must be disabled
+ */
+static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+{
+	u32 retval;
+	u32 tmp;
+
+	if (n >= PPC4XX_NUM_GD)
+		return ERING_WAS_FULL;
+
+	retval = dev->gdr_head;
+	tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
+	if (dev->gdr_head > dev->gdr_tail) {
+		if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
+			return ERING_WAS_FULL;
+	} else if (dev->gdr_head < dev->gdr_tail) {
+		if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
+			return ERING_WAS_FULL;
+	}
+	dev->gdr_head = tmp;
+
+	return retval;
+}
+
+static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->core_dev->lock, flags);
+	if (dev->gdr_tail == dev->gdr_head) {
+		spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+		return 0;
+	}
+
+	if (dev->gdr_tail != PPC4XX_LAST_GD)
+		dev->gdr_tail++;
+	else
+		dev->gdr_tail = 0;
+
+	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+	return 0;
+}
+
+static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
+					      dma_addr_t *gd_dma, u32 idx)
+{
+	*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
+
+	return &dev->gdr[idx];
+}
+
+/**
+ * alloc memory for the scatter ring
+ * need to alloc buf for the ring
+ * sdr_tail, sdr_head and sdr_count are initialized by this function
+ */
+static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
+{
+	int i;
+
+	/* alloc memory for scatter descriptor ring */
+	dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+				      sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+				      &dev->sdr_pa, GFP_ATOMIC);
+	if (!dev->sdr)
+		return -ENOMEM;
+
+	dev->scatter_buffer_va =
+		dma_alloc_coherent(dev->core_dev->device,
+			PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
+			&dev->scatter_buffer_pa, GFP_ATOMIC);
+	if (!dev->scatter_buffer_va) {
+		dma_free_coherent(dev->core_dev->device,
+				  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+				  dev->sdr, dev->sdr_pa);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < PPC4XX_NUM_SD; i++) {
+		dev->sdr[i].ptr = dev->scatter_buffer_pa +
+				  PPC4XX_SD_BUFFER_SIZE * i;
+	}
+
+	return 0;
+}
+
+static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
+{
+	if (dev->sdr)
+		dma_free_coherent(dev->core_dev->device,
+				  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+				  dev->sdr, dev->sdr_pa);
+
+	if (dev->scatter_buffer_va)
+		dma_free_coherent(dev->core_dev->device,
+				  PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
+				  dev->scatter_buffer_va,
+				  dev->scatter_buffer_pa);
+}
+
+/*
+ * when this function is called.
+ * preemption or interrupt must be disabled
+ */
+static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
+{
+	u32 retval;
+	u32 tmp;
+
+	if (n >= PPC4XX_NUM_SD)
+		return ERING_WAS_FULL;
+
+	retval = dev->sdr_head;
+	tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
+	if (dev->sdr_head > dev->gdr_tail) {
+		if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
+			return ERING_WAS_FULL;
+	} else if (dev->sdr_head < dev->sdr_tail) {
+		if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
+			return ERING_WAS_FULL;
+	} /* the head = tail, or empty case is already take cared */
+	dev->sdr_head = tmp;
+
+	return retval;
+}
+
+static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->core_dev->lock, flags);
+	if (dev->sdr_tail == dev->sdr_head) {
+		spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+		return 0;
+	}
+	if (dev->sdr_tail != PPC4XX_LAST_SD)
+		dev->sdr_tail++;
+	else
+		dev->sdr_tail = 0;
+	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+	return 0;
+}
+
+static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
+					      dma_addr_t *sd_dma, u32 idx)
+{
+	*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
+
+	return &dev->sdr[idx];
+}
+
+static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
+				      struct ce_pd *pd,
+				      struct pd_uinfo *pd_uinfo,
+				      u32 nbytes,
+				      struct scatterlist *dst)
+{
+	unsigned int first_sd = pd_uinfo->first_sd;
+	unsigned int last_sd;
+	unsigned int overflow = 0;
+	unsigned int to_copy;
+	unsigned int dst_start = 0;
+
+	/*
+	 * Because the scatter buffers are all neatly organized in one
+	 * big continuous ringbuffer; scatterwalk_map_and_copy() can
+	 * be instructed to copy a range of buffers in one go.
+	 */
+
+	last_sd = (first_sd + pd_uinfo->num_sd);
+	if (last_sd > PPC4XX_LAST_SD) {
+		last_sd = PPC4XX_LAST_SD;
+		overflow = last_sd % PPC4XX_NUM_SD;
+	}
+
+	while (nbytes) {
+		void *buf = dev->scatter_buffer_va +
+			first_sd * PPC4XX_SD_BUFFER_SIZE;
+
+		to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
+				      (1 + last_sd - first_sd));
+		scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
+		nbytes -= to_copy;
+
+		if (overflow) {
+			first_sd = 0;
+			last_sd = overflow;
+			dst_start += to_copy;
+			overflow = 0;
+		}
+	}
+}
+
+static void crypto4xx_copy_digest_to_dst(void *dst,
+					struct pd_uinfo *pd_uinfo,
+					struct crypto4xx_ctx *ctx)
+{
+	struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+	if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
+		memcpy(dst, pd_uinfo->sr_va->save_digest,
+		       SA_HASH_ALG_SHA1_DIGEST_SIZE);
+	}
+}
+
+static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
+				  struct pd_uinfo *pd_uinfo)
+{
+	int i;
+	if (pd_uinfo->num_gd) {
+		for (i = 0; i < pd_uinfo->num_gd; i++)
+			crypto4xx_put_gd_to_gdr(dev);
+		pd_uinfo->first_gd = 0xffffffff;
+		pd_uinfo->num_gd = 0;
+	}
+	if (pd_uinfo->num_sd) {
+		for (i = 0; i < pd_uinfo->num_sd; i++)
+			crypto4xx_put_sd_to_sdr(dev);
+
+		pd_uinfo->first_sd = 0xffffffff;
+		pd_uinfo->num_sd = 0;
+	}
+}
+
+static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
+				     struct pd_uinfo *pd_uinfo,
+				     struct ce_pd *pd)
+{
+	struct skcipher_request *req;
+	struct scatterlist *dst;
+	dma_addr_t addr;
+
+	req = skcipher_request_cast(pd_uinfo->async_req);
+
+	if (pd_uinfo->using_sd) {
+		crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+					  req->cryptlen, req->dst);
+	} else {
+		dst = pd_uinfo->dest_va;
+		addr = dma_map_page(dev->core_dev->device, sg_page(dst),
+				    dst->offset, dst->length, DMA_FROM_DEVICE);
+	}
+
+	if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
+		struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+
+		crypto4xx_memcpy_from_le32((u32 *)req->iv,
+			pd_uinfo->sr_va->save_iv,
+			crypto_skcipher_ivsize(skcipher));
+	}
+
+	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+	if (pd_uinfo->state & PD_ENTRY_BUSY)
+		skcipher_request_complete(req, -EINPROGRESS);
+	skcipher_request_complete(req, 0);
+}
+
+static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
+				struct pd_uinfo *pd_uinfo)
+{
+	struct crypto4xx_ctx *ctx;
+	struct ahash_request *ahash_req;
+
+	ahash_req = ahash_request_cast(pd_uinfo->async_req);
+	ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
+
+	crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
+				     crypto_tfm_ctx(ahash_req->base.tfm));
+	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+	if (pd_uinfo->state & PD_ENTRY_BUSY)
+		ahash_request_complete(ahash_req, -EINPROGRESS);
+	ahash_request_complete(ahash_req, 0);
+}
+
+static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+				struct pd_uinfo *pd_uinfo,
+				struct ce_pd *pd)
+{
+	struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+		struct aead_request, base);
+	struct scatterlist *dst = pd_uinfo->dest_va;
+	size_t cp_len = crypto_aead_authsize(
+		crypto_aead_reqtfm(aead_req));
+	u32 icv[AES_BLOCK_SIZE];
+	int err = 0;
+
+	if (pd_uinfo->using_sd) {
+		crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+					  pd->pd_ctl_len.bf.pkt_len,
+					  dst);
+	} else {
+		__dma_sync_page(sg_page(dst), dst->offset, dst->length,
+				DMA_FROM_DEVICE);
+	}
+
+	if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+		/* append icv at the end */
+		crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+					   sizeof(icv));
+
+		scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+					 cp_len, 1);
+	} else {
+		/* check icv at the end */
+		scatterwalk_map_and_copy(icv, aead_req->src,
+			aead_req->assoclen + aead_req->cryptlen -
+			cp_len, cp_len, 0);
+
+		crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
+
+		if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+			err = -EBADMSG;
+	}
+
+	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+	if (pd->pd_ctl.bf.status & 0xff) {
+		if (!__ratelimit(&dev->aead_ratelimit)) {
+			if (pd->pd_ctl.bf.status & 2)
+				pr_err("pad fail error\n");
+			if (pd->pd_ctl.bf.status & 4)
+				pr_err("seqnum fail\n");
+			if (pd->pd_ctl.bf.status & 8)
+				pr_err("error _notify\n");
+			pr_err("aead return err status = 0x%02x\n",
+				pd->pd_ctl.bf.status & 0xff);
+			pr_err("pd pad_ctl = 0x%08x\n",
+				pd->pd_ctl.bf.pd_pad_ctl);
+		}
+		err = -EINVAL;
+	}
+
+	if (pd_uinfo->state & PD_ENTRY_BUSY)
+		aead_request_complete(aead_req, -EINPROGRESS);
+
+	aead_request_complete(aead_req, err);
+}
+
+static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+{
+	struct ce_pd *pd = &dev->pdr[idx];
+	struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+
+	switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
+	case CRYPTO_ALG_TYPE_SKCIPHER:
+		crypto4xx_cipher_done(dev, pd_uinfo, pd);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		crypto4xx_aead_done(dev, pd_uinfo, pd);
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		crypto4xx_ahash_done(dev, pd_uinfo);
+		break;
+	}
+}
+
+static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
+{
+	crypto4xx_destroy_pdr(core_dev->dev);
+	crypto4xx_destroy_gdr(core_dev->dev);
+	crypto4xx_destroy_sdr(core_dev->dev);
+	iounmap(core_dev->dev->ce_base);
+	kfree(core_dev->dev);
+	kfree(core_dev);
+}
+
+static u32 get_next_gd(u32 current)
+{
+	if (current != PPC4XX_LAST_GD)
+		return current + 1;
+	else
+		return 0;
+}
+
+static u32 get_next_sd(u32 current)
+{
+	if (current != PPC4XX_LAST_SD)
+		return current + 1;
+	else
+		return 0;
+}
+
+int crypto4xx_build_pd(struct crypto_async_request *req,
+		       struct crypto4xx_ctx *ctx,
+		       struct scatterlist *src,
+		       struct scatterlist *dst,
+		       const unsigned int datalen,
+		       const __le32 *iv, const u32 iv_len,
+		       const struct dynamic_sa_ctl *req_sa,
+		       const unsigned int sa_len,
+		       const unsigned int assoclen,
+		       struct scatterlist *_dst)
+{
+	struct crypto4xx_device *dev = ctx->dev;
+	struct dynamic_sa_ctl *sa;
+	struct ce_gd *gd;
+	struct ce_pd *pd;
+	u32 num_gd, num_sd;
+	u32 fst_gd = 0xffffffff;
+	u32 fst_sd = 0xffffffff;
+	u32 pd_entry;
+	unsigned long flags;
+	struct pd_uinfo *pd_uinfo;
+	unsigned int nbytes = datalen;
+	size_t offset_to_sr_ptr;
+	u32 gd_idx = 0;
+	int tmp;
+	bool is_busy;
+
+	/* figure how many gd are needed */
+	tmp = sg_nents_for_len(src, assoclen + datalen);
+	if (tmp < 0) {
+		dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
+		return tmp;
+	}
+	if (tmp == 1)
+		tmp = 0;
+	num_gd = tmp;
+
+	if (assoclen) {
+		nbytes += assoclen;
+		dst = scatterwalk_ffwd(_dst, dst, assoclen);
+	}
+
+	/* figure how many sd are needed */
+	if (sg_is_last(dst)) {
+		num_sd = 0;
+	} else {
+		if (datalen > PPC4XX_SD_BUFFER_SIZE) {
+			num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
+			if (datalen % PPC4XX_SD_BUFFER_SIZE)
+				num_sd++;
+		} else {
+			num_sd = 1;
+		}
+	}
+
+	/*
+	 * The follow section of code needs to be protected
+	 * The gather ring and scatter ring needs to be consecutive
+	 * In case of run out of any kind of descriptor, the descriptor
+	 * already got must be return the original place.
+	 */
+	spin_lock_irqsave(&dev->core_dev->lock, flags);
+	/*
+	 * Let the caller know to slow down, once more than 13/16ths = 81%
+	 * of the available data contexts are being used simultaneously.
+	 *
+	 * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
+	 * 31 more contexts. Before new requests have to be rejected.
+	 */
+	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+		is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+			((PPC4XX_NUM_PD * 13) / 16);
+	} else {
+		/*
+		 * To fix contention issues between ipsec (no blacklog) and
+		 * dm-crypto (backlog) reserve 32 entries for "no backlog"
+		 * data contexts.
+		 */
+		is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+			((PPC4XX_NUM_PD * 15) / 16);
+
+		if (is_busy) {
+			spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+			return -EBUSY;
+		}
+	}
+
+	if (num_gd) {
+		fst_gd = crypto4xx_get_n_gd(dev, num_gd);
+		if (fst_gd == ERING_WAS_FULL) {
+			spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+			return -EAGAIN;
+		}
+	}
+	if (num_sd) {
+		fst_sd = crypto4xx_get_n_sd(dev, num_sd);
+		if (fst_sd == ERING_WAS_FULL) {
+			if (num_gd)
+				dev->gdr_head = fst_gd;
+			spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+			return -EAGAIN;
+		}
+	}
+	pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
+	if (pd_entry == ERING_WAS_FULL) {
+		if (num_gd)
+			dev->gdr_head = fst_gd;
+		if (num_sd)
+			dev->sdr_head = fst_sd;
+		spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+		return -EAGAIN;
+	}
+	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+
+	pd = &dev->pdr[pd_entry];
+	pd->sa_len = sa_len;
+
+	pd_uinfo = &dev->pdr_uinfo[pd_entry];
+	pd_uinfo->async_req = req;
+	pd_uinfo->num_gd = num_gd;
+	pd_uinfo->num_sd = num_sd;
+
+	if (iv_len)
+		memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
+
+	sa = pd_uinfo->sa_va;
+	memcpy(sa, req_sa, sa_len * 4);
+
+	sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
+	offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+	*(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
+
+	if (num_gd) {
+		dma_addr_t gd_dma;
+		struct scatterlist *sg;
+
+		/* get first gd we are going to use */
+		gd_idx = fst_gd;
+		pd_uinfo->first_gd = fst_gd;
+		pd_uinfo->num_gd = num_gd;
+		gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+		pd->src = gd_dma;
+		/* enable gather */
+		sa->sa_command_0.bf.gather = 1;
+		/* walk the sg, and setup gather array */
+
+		sg = src;
+		while (nbytes) {
+			size_t len;
+
+			len = min(sg->length, nbytes);
+			gd->ptr = dma_map_page(dev->core_dev->device,
+				sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
+			gd->ctl_len.len = len;
+			gd->ctl_len.done = 0;
+			gd->ctl_len.ready = 1;
+			if (len >= nbytes)
+				break;
+
+			nbytes -= sg->length;
+			gd_idx = get_next_gd(gd_idx);
+			gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+			sg = sg_next(sg);
+		}
+	} else {
+		pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
+				src->offset, min(nbytes, src->length),
+				DMA_TO_DEVICE);
+		/*
+		 * Disable gather in sa command
+		 */
+		sa->sa_command_0.bf.gather = 0;
+		/*
+		 * Indicate gather array is not used
+		 */
+		pd_uinfo->first_gd = 0xffffffff;
+		pd_uinfo->num_gd = 0;
+	}
+	if (sg_is_last(dst)) {
+		/*
+		 * we know application give us dst a whole piece of memory
+		 * no need to use scatter ring.
+		 */
+		pd_uinfo->using_sd = 0;
+		pd_uinfo->first_sd = 0xffffffff;
+		pd_uinfo->num_sd = 0;
+		pd_uinfo->dest_va = dst;
+		sa->sa_command_0.bf.scatter = 0;
+		pd->dest = (u32)dma_map_page(dev->core_dev->device,
+					     sg_page(dst), dst->offset,
+					     min(datalen, dst->length),
+					     DMA_TO_DEVICE);
+	} else {
+		dma_addr_t sd_dma;
+		struct ce_sd *sd = NULL;
+
+		u32 sd_idx = fst_sd;
+		nbytes = datalen;
+		sa->sa_command_0.bf.scatter = 1;
+		pd_uinfo->using_sd = 1;
+		pd_uinfo->dest_va = dst;
+		pd_uinfo->first_sd = fst_sd;
+		pd_uinfo->num_sd = num_sd;
+		sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+		pd->dest = sd_dma;
+		/* setup scatter descriptor */
+		sd->ctl.done = 0;
+		sd->ctl.rdy = 1;
+		/* sd->ptr should be setup by sd_init routine*/
+		if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+			nbytes -= PPC4XX_SD_BUFFER_SIZE;
+		else
+			nbytes = 0;
+		while (nbytes) {
+			sd_idx = get_next_sd(sd_idx);
+			sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
+			/* setup scatter descriptor */
+			sd->ctl.done = 0;
+			sd->ctl.rdy = 1;
+			if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
+				nbytes -= PPC4XX_SD_BUFFER_SIZE;
+			} else {
+				/*
+				 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
+				 * which is more than nbytes, so done.
+				 */
+				nbytes = 0;
+			}
+		}
+	}
+
+	pd->pd_ctl.w = PD_CTL_HOST_READY |
+		((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
+		 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+			PD_CTL_HASH_FINAL : 0);
+	pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
+	pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+
+	wmb();
+	/* write any value to push engine to read a pd */
+	writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+	writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+	return is_busy ? -EBUSY : -EINPROGRESS;
+}
+
+/**
+ * Algorithm Registration Functions
+ */
+static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
+			       struct crypto4xx_ctx *ctx)
+{
+	ctx->dev = amcc_alg->dev;
+	ctx->sa_in = NULL;
+	ctx->sa_out = NULL;
+	ctx->sa_len = 0;
+}
+
+static int crypto4xx_sk_init(struct crypto_skcipher *sk)
+{
+	struct skcipher_alg *alg = crypto_skcipher_alg(sk);
+	struct crypto4xx_alg *amcc_alg;
+	struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
+
+	if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+		ctx->sw_cipher.cipher =
+			crypto_alloc_skcipher(alg->base.cra_name, 0,
+					      CRYPTO_ALG_NEED_FALLBACK |
+					      CRYPTO_ALG_ASYNC);
+		if (IS_ERR(ctx->sw_cipher.cipher))
+			return PTR_ERR(ctx->sw_cipher.cipher);
+
+		crypto_skcipher_set_reqsize(sk,
+			sizeof(struct skcipher_request) + 32 +
+			crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
+	}
+
+	amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
+	crypto4xx_ctx_init(amcc_alg, ctx);
+	return 0;
+}
+
+static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
+{
+	crypto4xx_free_sa(ctx);
+}
+
+static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
+{
+	struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
+
+	crypto4xx_common_exit(ctx);
+	if (ctx->sw_cipher.cipher)
+		crypto_free_skcipher(ctx->sw_cipher.cipher);
+}
+
+static int crypto4xx_aead_init(struct crypto_aead *tfm)
+{
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto4xx_alg *amcc_alg;
+
+	ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
+						CRYPTO_ALG_NEED_FALLBACK |
+						CRYPTO_ALG_ASYNC);
+	if (IS_ERR(ctx->sw_cipher.aead))
+		return PTR_ERR(ctx->sw_cipher.aead);
+
+	amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
+	crypto4xx_ctx_init(amcc_alg, ctx);
+	crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
+				crypto_aead_reqsize(ctx->sw_cipher.aead),
+				sizeof(struct crypto4xx_aead_reqctx)));
+	return 0;
+}
+
+static void crypto4xx_aead_exit(struct crypto_aead *tfm)
+{
+	struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto4xx_common_exit(ctx);
+	crypto_free_aead(ctx->sw_cipher.aead);
+}
+
+static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+				  struct crypto4xx_alg_common *crypto_alg,
+				  int array_size)
+{
+	struct crypto4xx_alg *alg;
+	int i;
+	int rc = 0;
+
+	for (i = 0; i < array_size; i++) {
+		alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
+		if (!alg)
+			return -ENOMEM;
+
+		alg->alg = crypto_alg[i];
+		alg->dev = sec_dev;
+
+		switch (alg->alg.type) {
+		case CRYPTO_ALG_TYPE_AEAD:
+			rc = crypto_register_aead(&alg->alg.u.aead);
+			break;
+
+		case CRYPTO_ALG_TYPE_AHASH:
+			rc = crypto_register_ahash(&alg->alg.u.hash);
+			break;
+
+		default:
+			rc = crypto_register_skcipher(&alg->alg.u.cipher);
+			break;
+		}
+
+		if (rc)
+			kfree(alg);
+		else
+			list_add_tail(&alg->entry, &sec_dev->alg_list);
+	}
+
+	return 0;
+}
+
+static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
+{
+	struct crypto4xx_alg *alg, *tmp;
+
+	list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
+		list_del(&alg->entry);
+		switch (alg->alg.type) {
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&alg->alg.u.hash);
+			break;
+
+		case CRYPTO_ALG_TYPE_AEAD:
+			crypto_unregister_aead(&alg->alg.u.aead);
+			break;
+
+		default:
+			crypto_unregister_skcipher(&alg->alg.u.cipher);
+		}
+		kfree(alg);
+	}
+}
+
+static void crypto4xx_bh_tasklet_cb(unsigned long data)
+{
+	struct device *dev = (struct device *)data;
+	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+	struct pd_uinfo *pd_uinfo;
+	struct ce_pd *pd;
+	u32 tail = core_dev->dev->pdr_tail;
+	u32 head = core_dev->dev->pdr_head;
+
+	do {
+		pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+		pd = &core_dev->dev->pdr[tail];
+		if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
+		     ((READ_ONCE(pd->pd_ctl.w) &
+		       (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
+		       PD_CTL_PE_DONE)) {
+			crypto4xx_pd_done(core_dev->dev, tail);
+			tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
+		} else {
+			/* if tail not done, break */
+			break;
+		}
+	} while (head != tail);
+}
+
+/**
+ * Top Half of isr.
+ */
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+						      u32 clr_val)
+{
+	struct device *dev = (struct device *)data;
+	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+
+	writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+	tasklet_schedule(&core_dev->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+	return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+	return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+		PPC4XX_TMO_ERR_INT);
+}
+
+/**
+ * Supported Crypto Algorithms
+ */
+static struct crypto4xx_alg_common crypto4xx_alg[] = {
+	/* Crypto AES modes */
+	{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "cbc-aes-ppc4xx",
+			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize	= AES_IV_SIZE,
+		.setkey = crypto4xx_setkey_aes_cbc,
+		.encrypt = crypto4xx_encrypt_iv,
+		.decrypt = crypto4xx_decrypt_iv,
+		.init = crypto4xx_sk_init,
+		.exit = crypto4xx_sk_exit,
+	} },
+	{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+		.base = {
+			.cra_name = "cfb(aes)",
+			.cra_driver_name = "cfb-aes-ppc4xx",
+			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize	= AES_IV_SIZE,
+		.setkey	= crypto4xx_setkey_aes_cfb,
+		.encrypt = crypto4xx_encrypt_iv,
+		.decrypt = crypto4xx_decrypt_iv,
+		.init = crypto4xx_sk_init,
+		.exit = crypto4xx_sk_exit,
+	} },
+	{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+		.base = {
+			.cra_name = "ctr(aes)",
+			.cra_driver_name = "ctr-aes-ppc4xx",
+			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+				CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize	= AES_IV_SIZE,
+		.setkey	= crypto4xx_setkey_aes_ctr,
+		.encrypt = crypto4xx_encrypt_ctr,
+		.decrypt = crypto4xx_decrypt_ctr,
+		.init = crypto4xx_sk_init,
+		.exit = crypto4xx_sk_exit,
+	} },
+	{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+		.base = {
+			.cra_name = "rfc3686(ctr(aes))",
+			.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+		.ivsize	= CTR_RFC3686_IV_SIZE,
+		.setkey = crypto4xx_setkey_rfc3686,
+		.encrypt = crypto4xx_rfc3686_encrypt,
+		.decrypt = crypto4xx_rfc3686_decrypt,
+		.init = crypto4xx_sk_init,
+		.exit = crypto4xx_sk_exit,
+	} },
+	{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "ecb-aes-ppc4xx",
+			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey	= crypto4xx_setkey_aes_ecb,
+		.encrypt = crypto4xx_encrypt_noiv,
+		.decrypt = crypto4xx_decrypt_noiv,
+		.init = crypto4xx_sk_init,
+		.exit = crypto4xx_sk_exit,
+	} },
+	{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+		.base = {
+			.cra_name = "ofb(aes)",
+			.cra_driver_name = "ofb-aes-ppc4xx",
+			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize	= AES_IV_SIZE,
+		.setkey	= crypto4xx_setkey_aes_ofb,
+		.encrypt = crypto4xx_encrypt_iv,
+		.decrypt = crypto4xx_decrypt_iv,
+		.init = crypto4xx_sk_init,
+		.exit = crypto4xx_sk_exit,
+	} },
+
+	/* AEAD */
+	{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+		.setkey		= crypto4xx_setkey_aes_ccm,
+		.setauthsize	= crypto4xx_setauthsize_aead,
+		.encrypt	= crypto4xx_encrypt_aes_ccm,
+		.decrypt	= crypto4xx_decrypt_aes_ccm,
+		.init		= crypto4xx_aead_init,
+		.exit		= crypto4xx_aead_exit,
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize    = 16,
+		.base = {
+			.cra_name	= "ccm(aes)",
+			.cra_driver_name = "ccm-aes-ppc4xx",
+			.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags	= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize	= 1,
+			.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+			.cra_module	= THIS_MODULE,
+		},
+	} },
+	{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+		.setkey		= crypto4xx_setkey_aes_gcm,
+		.setauthsize	= crypto4xx_setauthsize_aead,
+		.encrypt	= crypto4xx_encrypt_aes_gcm,
+		.decrypt	= crypto4xx_decrypt_aes_gcm,
+		.init		= crypto4xx_aead_init,
+		.exit		= crypto4xx_aead_exit,
+		.ivsize		= GCM_AES_IV_SIZE,
+		.maxauthsize	= 16,
+		.base = {
+			.cra_name	= "gcm(aes)",
+			.cra_driver_name = "gcm-aes-ppc4xx",
+			.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+			.cra_flags	= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize	= 1,
+			.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+			.cra_module	= THIS_MODULE,
+		},
+	} },
+};
+
+/**
+ * Module Initialization Routine
+ */
+static int crypto4xx_probe(struct platform_device *ofdev)
+{
+	int rc;
+	struct resource res;
+	struct device *dev = &ofdev->dev;
+	struct crypto4xx_core_device *core_dev;
+	u32 pvr;
+	bool is_revb = true;
+
+	rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+	if (rc)
+		return -ENODEV;
+
+	if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
+		mtdcri(SDR0, PPC460EX_SDR0_SRST,
+		       mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
+		mtdcri(SDR0, PPC460EX_SDR0_SRST,
+		       mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
+	} else if (of_find_compatible_node(NULL, NULL,
+			"amcc,ppc405ex-crypto")) {
+		mtdcri(SDR0, PPC405EX_SDR0_SRST,
+		       mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
+		mtdcri(SDR0, PPC405EX_SDR0_SRST,
+		       mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+		is_revb = false;
+	} else if (of_find_compatible_node(NULL, NULL,
+			"amcc,ppc460sx-crypto")) {
+		mtdcri(SDR0, PPC460SX_SDR0_SRST,
+		       mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
+		mtdcri(SDR0, PPC460SX_SDR0_SRST,
+		       mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
+	} else {
+		printk(KERN_ERR "Crypto Function Not supported!\n");
+		return -EINVAL;
+	}
+
+	core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
+	if (!core_dev)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, core_dev);
+	core_dev->ofdev = ofdev;
+	core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
+	rc = -ENOMEM;
+	if (!core_dev->dev)
+		goto err_alloc_dev;
+
+	/*
+	 * Older version of 460EX/GT have a hardware bug.
+	 * Hence they do not support H/W based security intr coalescing
+	 */
+	pvr = mfspr(SPRN_PVR);
+	if (is_revb && ((pvr >> 4) == 0x130218A)) {
+		u32 min = PVR_MIN(pvr);
+
+		if (min < 4) {
+			dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+			is_revb = false;
+		}
+	}
+
+	core_dev->dev->core_dev = core_dev;
+	core_dev->dev->is_revb = is_revb;
+	core_dev->device = dev;
+	spin_lock_init(&core_dev->lock);
+	INIT_LIST_HEAD(&core_dev->dev->alg_list);
+	ratelimit_default_init(&core_dev->dev->aead_ratelimit);
+	rc = crypto4xx_build_pdr(core_dev->dev);
+	if (rc)
+		goto err_build_pdr;
+
+	rc = crypto4xx_build_gdr(core_dev->dev);
+	if (rc)
+		goto err_build_pdr;
+
+	rc = crypto4xx_build_sdr(core_dev->dev);
+	if (rc)
+		goto err_build_sdr;
+
+	/* Init tasklet for bottom half processing */
+	tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
+		     (unsigned long) dev);
+
+	core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
+	if (!core_dev->dev->ce_base) {
+		dev_err(dev, "failed to of_iomap\n");
+		rc = -ENOMEM;
+		goto err_iomap;
+	}
+
+	/* Register for Crypto isr, Crypto Engine IRQ */
+	core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+	rc = request_irq(core_dev->irq, is_revb ?
+			 crypto4xx_ce_interrupt_handler_revb :
+			 crypto4xx_ce_interrupt_handler, 0,
+			 KBUILD_MODNAME, dev);
+	if (rc)
+		goto err_request_irq;
+
+	/* need to setup pdr, rdr, gdr and sdr before this */
+	crypto4xx_hw_init(core_dev->dev);
+
+	/* Register security algorithms with Linux CryptoAPI */
+	rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
+			       ARRAY_SIZE(crypto4xx_alg));
+	if (rc)
+		goto err_start_dev;
+
+	ppc4xx_trng_probe(core_dev);
+	return 0;
+
+err_start_dev:
+	free_irq(core_dev->irq, dev);
+err_request_irq:
+	irq_dispose_mapping(core_dev->irq);
+	iounmap(core_dev->dev->ce_base);
+err_iomap:
+	tasklet_kill(&core_dev->tasklet);
+err_build_sdr:
+	crypto4xx_destroy_sdr(core_dev->dev);
+	crypto4xx_destroy_gdr(core_dev->dev);
+err_build_pdr:
+	crypto4xx_destroy_pdr(core_dev->dev);
+	kfree(core_dev->dev);
+err_alloc_dev:
+	kfree(core_dev);
+
+	return rc;
+}
+
+static int crypto4xx_remove(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+
+	ppc4xx_trng_remove(core_dev);
+
+	free_irq(core_dev->irq, dev);
+	irq_dispose_mapping(core_dev->irq);
+
+	tasklet_kill(&core_dev->tasklet);
+	/* Un-register with Linux CryptoAPI */
+	crypto4xx_unregister_alg(core_dev->dev);
+	/* Free all allocated memory */
+	crypto4xx_stop_all(core_dev);
+
+	return 0;
+}
+
+static const struct of_device_id crypto4xx_match[] = {
+	{ .compatible      = "amcc,ppc4xx-crypto",},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, crypto4xx_match);
+
+static struct platform_driver crypto4xx_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = crypto4xx_match,
+	},
+	.probe		= crypto4xx_probe,
+	.remove		= crypto4xx_remove,
+};
+
+module_platform_driver(crypto4xx_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
+MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
new file mode 100644
index 0000000..e2ca567
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -0,0 +1,248 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This is the header file for AMCC Crypto offload Linux device driver for
+ * use with Linux CryptoAPI.
+
+ */
+
+#ifndef __CRYPTO4XX_CORE_H__
+#define __CRYPTO4XX_CORE_H__
+
+#include <linux/ratelimit.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
+
+#define PPC460SX_SDR0_SRST                      0x201
+#define PPC405EX_SDR0_SRST                      0x200
+#define PPC460EX_SDR0_SRST                      0x201
+#define PPC460EX_CE_RESET                       0x08000000
+#define PPC460SX_CE_RESET                       0x20000000
+#define PPC405EX_CE_RESET                       0x00000008
+
+#define CRYPTO4XX_CRYPTO_PRIORITY		300
+#define PPC4XX_NUM_PD				256
+#define PPC4XX_LAST_PD				(PPC4XX_NUM_PD - 1)
+#define PPC4XX_NUM_GD				1024
+#define PPC4XX_LAST_GD				(PPC4XX_NUM_GD - 1)
+#define PPC4XX_NUM_SD				256
+#define PPC4XX_LAST_SD				(PPC4XX_NUM_SD - 1)
+#define PPC4XX_SD_BUFFER_SIZE			2048
+
+#define PD_ENTRY_BUSY				BIT(1)
+#define PD_ENTRY_INUSE				BIT(0)
+#define PD_ENTRY_FREE				0
+#define ERING_WAS_FULL				0xffffffff
+
+struct crypto4xx_device;
+
+union shadow_sa_buf {
+	struct dynamic_sa_ctl sa;
+
+	/* alloc 256 bytes which is enough for any kind of dynamic sa */
+	u8 buf[256];
+} __packed;
+
+struct pd_uinfo {
+	struct crypto4xx_device *dev;
+	u32   state;
+	u32 using_sd;
+	u32 first_gd;		/* first gather discriptor
+				used by this packet */
+	u32 num_gd;             /* number of gather discriptor
+				used by this packet */
+	u32 first_sd;		/* first scatter discriptor
+				used by this packet */
+	u32 num_sd;		/* number of scatter discriptors
+				used by this packet */
+	struct dynamic_sa_ctl *sa_va;	/* shadow sa */
+	struct sa_state_record *sr_va;	/* state record for shadow sa */
+	u32 sr_pa;
+	struct scatterlist *dest_va;
+	struct crypto_async_request *async_req; 	/* base crypto request
+							for this packet */
+};
+
+struct crypto4xx_device {
+	struct crypto4xx_core_device *core_dev;
+	void __iomem *ce_base;
+	void __iomem *trng_base;
+
+	struct ce_pd *pdr;	/* base address of packet descriptor ring */
+	dma_addr_t pdr_pa;	/* physical address of pdr_base_register */
+	struct ce_gd *gdr;	/* gather descriptor ring */
+	dma_addr_t gdr_pa;	/* physical address of gdr_base_register */
+	struct ce_sd *sdr;	/* scatter descriptor ring */
+	dma_addr_t sdr_pa;	/* physical address of sdr_base_register */
+	void *scatter_buffer_va;
+	dma_addr_t scatter_buffer_pa;
+
+	union shadow_sa_buf *shadow_sa_pool;
+	dma_addr_t shadow_sa_pool_pa;
+	struct sa_state_record *shadow_sr_pool;
+	dma_addr_t shadow_sr_pool_pa;
+	u32 pdr_tail;
+	u32 pdr_head;
+	u32 gdr_tail;
+	u32 gdr_head;
+	u32 sdr_tail;
+	u32 sdr_head;
+	struct pd_uinfo *pdr_uinfo;
+	struct list_head alg_list;	/* List of algorithm supported
+					by this device */
+	struct ratelimit_state aead_ratelimit;
+	bool is_revb;
+};
+
+struct crypto4xx_core_device {
+	struct device *device;
+	struct platform_device *ofdev;
+	struct crypto4xx_device *dev;
+	struct hwrng *trng;
+	u32 int_status;
+	u32 irq;
+	struct tasklet_struct tasklet;
+	spinlock_t lock;
+};
+
+struct crypto4xx_ctx {
+	struct crypto4xx_device *dev;
+	struct dynamic_sa_ctl *sa_in;
+	struct dynamic_sa_ctl *sa_out;
+	__le32 iv_nonce;
+	u32 sa_len;
+	union {
+		struct crypto_skcipher *cipher;
+		struct crypto_aead *aead;
+	} sw_cipher;
+};
+
+struct crypto4xx_aead_reqctx {
+	struct scatterlist dst[2];
+};
+
+struct crypto4xx_alg_common {
+	u32 type;
+	union {
+		struct skcipher_alg cipher;
+		struct ahash_alg hash;
+		struct aead_alg aead;
+	} u;
+};
+
+struct crypto4xx_alg {
+	struct list_head  entry;
+	struct crypto4xx_alg_common alg;
+	struct crypto4xx_device *dev;
+};
+
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+int crypto4xx_build_pd(struct crypto_async_request *req,
+		       struct crypto4xx_ctx *ctx,
+		       struct scatterlist *src,
+		       struct scatterlist *dst,
+		       const unsigned int datalen,
+		       const __le32 *iv, const u32 iv_len,
+		       const struct dynamic_sa_ctl *sa,
+		       const unsigned int sa_len,
+		       const unsigned int assoclen,
+		       struct scatterlist *dst_tmp);
+int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_ctr(struct skcipher_request *req);
+int crypto4xx_decrypt_ctr(struct skcipher_request *req);
+int crypto4xx_encrypt_iv(struct skcipher_request *req);
+int crypto4xx_decrypt_iv(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+int crypto4xx_hash_digest(struct ahash_request *req);
+int crypto4xx_hash_final(struct ahash_request *req);
+int crypto4xx_hash_update(struct ahash_request *req);
+int crypto4xx_hash_init(struct ahash_request *req);
+
+/**
+ * Note: Only use this function to copy items that is word aligned.
+ */
+static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
+					   size_t len)
+{
+	for (; len >= 4; buf += 4, len -= 4)
+		*dst++ = __swab32p((u32 *) buf);
+
+	if (len) {
+		const u8 *tmp = (u8 *)buf;
+
+		switch (len) {
+		case 3:
+			*dst = (tmp[2] << 16) |
+			       (tmp[1] << 8) |
+			       tmp[0];
+			break;
+		case 2:
+			*dst = (tmp[1] << 8) |
+			       tmp[0];
+			break;
+		case 1:
+			*dst = tmp[0];
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
+					      size_t len)
+{
+	crypto4xx_memcpy_swab32(dst, buf, len);
+}
+
+static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
+					    size_t len)
+{
+	crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
+			       unsigned int authsize);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+			     const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
new file mode 100644
index 0000000..4723317
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -0,0 +1,290 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This filr defines the register set for Security Subsystem
+ */
+
+#ifndef __CRYPTO4XX_REG_DEF_H__
+#define __CRYPTO4XX_REG_DEF_H__
+
+/* CRYPTO4XX Register offset */
+#define CRYPTO4XX_DESCRIPTOR			0x00000000
+#define CRYPTO4XX_CTRL_STAT			0x00000000
+#define CRYPTO4XX_SOURCE			0x00000004
+#define CRYPTO4XX_DEST				0x00000008
+#define CRYPTO4XX_SA				0x0000000C
+#define CRYPTO4XX_SA_LENGTH			0x00000010
+#define CRYPTO4XX_LENGTH			0x00000014
+
+#define CRYPTO4XX_PE_DMA_CFG			0x00000040
+#define CRYPTO4XX_PE_DMA_STAT			0x00000044
+#define CRYPTO4XX_PDR_BASE			0x00000048
+#define CRYPTO4XX_RDR_BASE			0x0000004c
+#define CRYPTO4XX_RING_SIZE			0x00000050
+#define CRYPTO4XX_RING_CTRL			0x00000054
+#define CRYPTO4XX_INT_RING_STAT			0x00000058
+#define CRYPTO4XX_EXT_RING_STAT			0x0000005c
+#define CRYPTO4XX_IO_THRESHOLD			0x00000060
+#define CRYPTO4XX_GATH_RING_BASE		0x00000064
+#define CRYPTO4XX_SCAT_RING_BASE		0x00000068
+#define CRYPTO4XX_PART_RING_SIZE		0x0000006c
+#define CRYPTO4XX_PART_RING_CFG		        0x00000070
+
+#define CRYPTO4XX_PDR_BASE_UADDR		0x00000080
+#define CRYPTO4XX_RDR_BASE_UADDR		0x00000084
+#define CRYPTO4XX_PKT_SRC_UADDR			0x00000088
+#define CRYPTO4XX_PKT_DEST_UADDR		0x0000008c
+#define CRYPTO4XX_SA_UADDR			0x00000090
+#define CRYPTO4XX_GATH_RING_BASE_UADDR		0x000000A0
+#define CRYPTO4XX_SCAT_RING_BASE_UADDR		0x000000A4
+
+#define CRYPTO4XX_SEQ_RD			0x00000408
+#define CRYPTO4XX_SEQ_MASK_RD			0x0000040C
+
+#define CRYPTO4XX_SA_CMD_0			0x00010600
+#define CRYPTO4XX_SA_CMD_1			0x00010604
+
+#define CRYPTO4XX_STATE_PTR			0x000106dc
+#define CRYPTO4XX_STATE_IV			0x00010700
+#define CRYPTO4XX_STATE_HASH_BYTE_CNT_0		0x00010710
+#define CRYPTO4XX_STATE_HASH_BYTE_CNT_1		0x00010714
+
+#define CRYPTO4XX_STATE_IDIGEST_0		0x00010718
+#define CRYPTO4XX_STATE_IDIGEST_1		0x0001071c
+
+#define CRYPTO4XX_DATA_IN			0x00018000
+#define CRYPTO4XX_DATA_OUT			0x0001c000
+
+#define CRYPTO4XX_INT_UNMASK_STAT		0x000500a0
+#define CRYPTO4XX_INT_MASK_STAT			0x000500a4
+#define CRYPTO4XX_INT_CLR			0x000500a4
+#define CRYPTO4XX_INT_EN			0x000500a8
+
+#define CRYPTO4XX_INT_PKA			0x00000002
+#define CRYPTO4XX_INT_PDR_DONE			0x00008000
+#define CRYPTO4XX_INT_MA_WR_ERR			0x00020000
+#define CRYPTO4XX_INT_MA_RD_ERR			0x00010000
+#define CRYPTO4XX_INT_PE_ERR			0x00000200
+#define CRYPTO4XX_INT_USER_DMA_ERR		0x00000040
+#define CRYPTO4XX_INT_SLAVE_ERR			0x00000010
+#define CRYPTO4XX_INT_MASTER_ERR		0x00000008
+#define CRYPTO4XX_INT_ERROR			0x00030258
+
+#define CRYPTO4XX_INT_CFG			0x000500ac
+#define CRYPTO4XX_INT_DESCR_RD			0x000500b0
+#define CRYPTO4XX_INT_DESCR_CNT			0x000500b4
+#define CRYPTO4XX_INT_TIMEOUT_CNT		0x000500b8
+
+#define CRYPTO4XX_DEVICE_CTRL			0x00060080
+#define CRYPTO4XX_DEVICE_ID			0x00060084
+#define CRYPTO4XX_DEVICE_INFO			0x00060088
+#define CRYPTO4XX_DMA_USER_SRC			0x00060094
+#define CRYPTO4XX_DMA_USER_DEST			0x00060098
+#define CRYPTO4XX_DMA_USER_CMD			0x0006009C
+
+#define CRYPTO4XX_DMA_CFG	        	0x000600d4
+#define CRYPTO4XX_BYTE_ORDER_CFG 		0x000600d8
+#define CRYPTO4XX_ENDIAN_CFG			0x000600d8
+
+#define CRYPTO4XX_PRNG_STAT			0x00070000
+#define CRYPTO4XX_PRNG_CTRL			0x00070004
+#define CRYPTO4XX_PRNG_SEED_L			0x00070008
+#define CRYPTO4XX_PRNG_SEED_H			0x0007000c
+
+#define CRYPTO4XX_PRNG_RES_0			0x00070020
+#define CRYPTO4XX_PRNG_RES_1			0x00070024
+#define CRYPTO4XX_PRNG_RES_2			0x00070028
+#define CRYPTO4XX_PRNG_RES_3			0x0007002C
+
+#define CRYPTO4XX_PRNG_LFSR_L			0x00070030
+#define CRYPTO4XX_PRNG_LFSR_H			0x00070034
+
+/**
+ * Initialize CRYPTO ENGINE registers, and memory bases.
+ */
+#define PPC4XX_PDR_POLL				0x3ff
+#define PPC4XX_OUTPUT_THRESHOLD			2
+#define PPC4XX_INPUT_THRESHOLD			2
+#define PPC4XX_PD_SIZE				6
+#define PPC4XX_CTX_DONE_INT			0x2000
+#define PPC4XX_PD_DONE_INT			0x8000
+#define PPC4XX_TMO_ERR_INT			0x40000
+#define PPC4XX_BYTE_ORDER			0x22222
+#define PPC4XX_INTERRUPT_CLR			0x3ffff
+#define PPC4XX_PRNG_CTRL_AUTO_EN		0x3
+#define PPC4XX_DC_3DES_EN			1
+#define PPC4XX_TRNG_EN				0x00020000
+#define PPC4XX_INT_DESCR_CNT			7
+#define PPC4XX_INT_TIMEOUT_CNT			0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB		0x3FF
+#define PPC4XX_INT_CFG				1
+/**
+ * all follow define are ad hoc
+ */
+#define PPC4XX_RING_RETRY			100
+#define PPC4XX_RING_POLL			100
+#define PPC4XX_SDR_SIZE				PPC4XX_NUM_SD
+#define PPC4XX_GDR_SIZE				PPC4XX_NUM_GD
+
+/**
+  * Generic Security Association (SA) with all possible fields. These will
+ * never likely used except for reference purpose. These structure format
+ * can be not changed as the hardware expects them to be layout as defined.
+ * Field can be removed or reduced but ordering can not be changed.
+ */
+#define CRYPTO4XX_DMA_CFG_OFFSET		0x40
+union ce_pe_dma_cfg {
+	struct {
+		u32 rsv:7;
+		u32 dir_host:1;
+		u32 rsv1:2;
+		u32 bo_td_en:1;
+		u32 dis_pdr_upd:1;
+		u32 bo_sgpd_en:1;
+		u32 bo_data_en:1;
+		u32 bo_sa_en:1;
+		u32 bo_pd_en:1;
+		u32 rsv2:4;
+		u32 dynamic_sa_en:1;
+		u32 pdr_mode:2;
+		u32 pe_mode:1;
+		u32 rsv3:5;
+		u32 reset_sg:1;
+		u32 reset_pdr:1;
+		u32 reset_pe:1;
+	} bf;
+    u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_PDR_BASE_OFFSET		0x48
+#define CRYPTO4XX_RDR_BASE_OFFSET		0x4c
+#define CRYPTO4XX_RING_SIZE_OFFSET		0x50
+union ce_ring_size {
+	struct {
+		u32 ring_offset:16;
+		u32 rsv:6;
+		u32 ring_size:10;
+	} bf;
+    u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_RING_CONTROL_OFFSET		0x54
+union ce_ring_control {
+	struct {
+		u32 continuous:1;
+		u32 rsv:5;
+		u32 ring_retry_divisor:10;
+		u32 rsv1:4;
+		u32 ring_poll_divisor:10;
+	} bf;
+    u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_IO_THRESHOLD_OFFSET		0x60
+union ce_io_threshold {
+	struct {
+		u32 rsv:6;
+		u32 output_threshold:10;
+		u32 rsv1:6;
+		u32 input_threshold:10;
+	} bf;
+    u32 w;
+} __attribute__((packed));
+
+#define CRYPTO4XX_GATHER_RING_BASE_OFFSET	0x64
+#define CRYPTO4XX_SCATTER_RING_BASE_OFFSET	0x68
+
+union ce_part_ring_size  {
+	struct {
+		u32 sdr_size:16;
+		u32 gdr_size:16;
+	} bf;
+    u32 w;
+} __attribute__((packed));
+
+#define MAX_BURST_SIZE_32			0
+#define MAX_BURST_SIZE_64			1
+#define MAX_BURST_SIZE_128			2
+#define MAX_BURST_SIZE_256			3
+
+/* gather descriptor control length */
+struct gd_ctl_len {
+	u32 len:16;
+	u32 rsv:14;
+	u32 done:1;
+	u32 ready:1;
+} __attribute__((packed));
+
+struct ce_gd {
+	u32 ptr;
+	struct gd_ctl_len ctl_len;
+} __attribute__((packed));
+
+struct sd_ctl {
+	u32 ctl:30;
+	u32 done:1;
+	u32 rdy:1;
+} __attribute__((packed));
+
+struct ce_sd {
+    u32 ptr;
+	struct sd_ctl ctl;
+} __attribute__((packed));
+
+#define PD_PAD_CTL_32	0x10
+#define PD_PAD_CTL_64	0x20
+#define PD_PAD_CTL_128	0x40
+#define PD_PAD_CTL_256	0x80
+union ce_pd_ctl {
+	struct {
+		u32 pd_pad_ctl:8;
+		u32 status:8;
+		u32 next_hdr:8;
+		u32 rsv:2;
+		u32 cached_sa:1;
+		u32 hash_final:1;
+		u32 init_arc4:1;
+		u32 rsv1:1;
+		u32 pe_done:1;
+		u32 host_ready:1;
+	} bf;
+	u32 w;
+} __attribute__((packed));
+#define PD_CTL_HASH_FINAL	BIT(4)
+#define PD_CTL_PE_DONE		BIT(1)
+#define PD_CTL_HOST_READY	BIT(0)
+
+union ce_pd_ctl_len {
+	struct {
+		u32 bypass:8;
+		u32 pe_done:1;
+		u32 host_ready:1;
+		u32 rsv:2;
+		u32 pkt_len:20;
+	} bf;
+	u32 w;
+} __attribute__((packed));
+
+struct ce_pd {
+	union ce_pd_ctl   pd_ctl;
+	u32 src;
+	u32 dest;
+	u32 sa;                 /* get from ctx->sa_dma_addr */
+	u32 sa_len;             /* only if dynamic sa is used */
+	union ce_pd_ctl_len pd_ctl_len;
+
+} __attribute__((packed));
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
new file mode 100644
index 0000000..a4d4035
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -0,0 +1,318 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file defines the security context
+ * associate format.
+ */
+
+#ifndef __CRYPTO4XX_SA_H__
+#define __CRYPTO4XX_SA_H__
+
+#define AES_IV_SIZE				16
+
+/**
+ * Contents of Dynamic Security Association (SA) with all possible fields
+ */
+union dynamic_sa_contents {
+	struct {
+		u32 arc4_state_ptr:1;
+		u32 arc4_ij_ptr:1;
+		u32 state_ptr:1;
+		u32 iv3:1;
+		u32 iv2:1;
+		u32 iv1:1;
+		u32 iv0:1;
+		u32 seq_num_mask3:1;
+		u32 seq_num_mask2:1;
+		u32 seq_num_mask1:1;
+		u32 seq_num_mask0:1;
+		u32 seq_num1:1;
+		u32 seq_num0:1;
+		u32 spi:1;
+		u32 outer_size:5;
+		u32 inner_size:5;
+		u32 key_size:4;
+		u32 cmd_size:4;
+	} bf;
+	u32 w;
+} __attribute__((packed));
+
+#define DIR_OUTBOUND				0
+#define DIR_INBOUND				1
+#define SA_OP_GROUP_BASIC			0
+#define SA_OPCODE_ENCRYPT			0
+#define SA_OPCODE_DECRYPT			0
+#define SA_OPCODE_ENCRYPT_HASH			1
+#define SA_OPCODE_HASH_DECRYPT			1
+#define SA_OPCODE_HASH				3
+#define SA_CIPHER_ALG_DES			0
+#define SA_CIPHER_ALG_3DES			1
+#define SA_CIPHER_ALG_ARC4			2
+#define SA_CIPHER_ALG_AES			3
+#define SA_CIPHER_ALG_KASUMI			4
+#define SA_CIPHER_ALG_NULL			15
+
+#define SA_HASH_ALG_MD5				0
+#define SA_HASH_ALG_SHA1			1
+#define SA_HASH_ALG_GHASH			12
+#define SA_HASH_ALG_CBC_MAC			14
+#define SA_HASH_ALG_NULL			15
+#define SA_HASH_ALG_SHA1_DIGEST_SIZE		20
+
+#define SA_LOAD_HASH_FROM_SA			0
+#define SA_LOAD_HASH_FROM_STATE			2
+#define SA_NOT_LOAD_HASH			3
+#define SA_LOAD_IV_FROM_SA			0
+#define SA_LOAD_IV_FROM_INPUT			1
+#define SA_LOAD_IV_FROM_STATE			2
+#define SA_LOAD_IV_GEN_IV			3
+
+#define SA_PAD_TYPE_CONSTANT			2
+#define SA_PAD_TYPE_ZERO			3
+#define SA_PAD_TYPE_TLS				5
+#define SA_PAD_TYPE_DTLS			5
+#define SA_NOT_SAVE_HASH			0
+#define SA_SAVE_HASH				1
+#define SA_NOT_SAVE_IV				0
+#define SA_SAVE_IV				1
+#define SA_HEADER_PROC				1
+#define SA_NO_HEADER_PROC			0
+
+union sa_command_0 {
+	struct {
+		u32 scatter:1;
+		u32 gather:1;
+		u32 save_hash_state:1;
+		u32 save_iv:1;
+		u32 load_hash_state:2;
+		u32 load_iv:2;
+		u32 digest_len:4;
+		u32 hdr_proc:1;
+		u32 extend_pad:1;
+		u32 stream_cipher_pad:1;
+		u32 rsv:1;
+		u32 hash_alg:4;
+		u32 cipher_alg:4;
+		u32 pad_type:2;
+		u32 op_group:2;
+		u32 dir:1;
+		u32 opcode:3;
+	} bf;
+	u32 w;
+} __attribute__((packed));
+
+#define CRYPTO_MODE_ECB				0
+#define CRYPTO_MODE_CBC				1
+#define CRYPTO_MODE_OFB				2
+#define CRYPTO_MODE_CFB				3
+#define CRYPTO_MODE_CTR				4
+
+#define CRYPTO_FEEDBACK_MODE_NO_FB		0
+#define CRYPTO_FEEDBACK_MODE_64BIT_OFB		0
+#define CRYPTO_FEEDBACK_MODE_8BIT_CFB		1
+#define CRYPTO_FEEDBACK_MODE_1BIT_CFB		2
+#define CRYPTO_FEEDBACK_MODE_128BIT_CFB		3
+
+#define SA_AES_KEY_LEN_128			2
+#define SA_AES_KEY_LEN_192			3
+#define SA_AES_KEY_LEN_256			4
+
+#define SA_REV2					1
+/**
+ * The follow defines bits sa_command_1
+ * In Basic hash mode  this bit define simple hash or hmac.
+ * In IPsec mode, this bit define muting control.
+ */
+#define SA_HASH_MODE_HASH			0
+#define SA_HASH_MODE_HMAC			1
+#define SA_MC_ENABLE				0
+#define SA_MC_DISABLE				1
+#define SA_NOT_COPY_HDR				0
+#define SA_COPY_HDR				1
+#define SA_NOT_COPY_PAD				0
+#define SA_COPY_PAD				1
+#define SA_NOT_COPY_PAYLOAD			0
+#define SA_COPY_PAYLOAD				1
+#define SA_EXTENDED_SN_OFF			0
+#define SA_EXTENDED_SN_ON			1
+#define SA_SEQ_MASK_OFF				0
+#define SA_SEQ_MASK_ON				1
+
+union sa_command_1 {
+	struct {
+		u32 crypto_mode31:1;
+		u32 save_arc4_state:1;
+		u32 arc4_stateful:1;
+		u32 key_len:5;
+		u32 hash_crypto_offset:8;
+		u32 sa_rev:2;
+		u32 byte_offset:1;
+		u32 hmac_muting:1;
+		u32 feedback_mode:2;
+		u32 crypto_mode9_8:2;
+		u32 extended_seq_num:1;
+		u32 seq_num_mask:1;
+		u32 mutable_bit_proc:1;
+		u32 ip_version:1;
+		u32 copy_pad:1;
+		u32 copy_payload:1;
+		u32 copy_hdr:1;
+		u32 rsv1:1;
+	} bf;
+	u32 w;
+} __attribute__((packed));
+
+struct dynamic_sa_ctl {
+	union dynamic_sa_contents sa_contents;
+	union sa_command_0 sa_command_0;
+	union sa_command_1 sa_command_1;
+} __attribute__((packed));
+
+/**
+ * State Record for Security Association (SA)
+ */
+struct  sa_state_record {
+	__le32 save_iv[4];
+	__le32 save_hash_byte_cnt[2];
+	union {
+		u32 save_digest[16]; /* for MD5/SHA */
+		__le32 save_digest_le32[16]; /* GHASH / CBC */
+	};
+} __attribute__((packed));
+
+/**
+ * Security Association (SA) for AES128
+ *
+ */
+struct dynamic_sa_aes128 {
+	struct dynamic_sa_ctl	ctrl;
+	__le32 key[4];
+	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
+	u32 state_ptr;
+	u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES128_LEN		(sizeof(struct dynamic_sa_aes128)/4)
+#define SA_AES128_CONTENTS	0x3e000042
+
+/*
+ * Security Association (SA) for AES192
+ */
+struct dynamic_sa_aes192 {
+	struct dynamic_sa_ctl ctrl;
+	__le32 key[6];
+	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
+	u32 state_ptr;
+	u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES192_LEN		(sizeof(struct dynamic_sa_aes192)/4)
+#define SA_AES192_CONTENTS	0x3e000062
+
+/**
+ * Security Association (SA) for AES256
+ */
+struct dynamic_sa_aes256 {
+	struct dynamic_sa_ctl ctrl;
+	__le32 key[8];
+	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
+	u32 state_ptr;
+	u32 reserved;
+} __attribute__((packed));
+
+#define SA_AES256_LEN		(sizeof(struct dynamic_sa_aes256)/4)
+#define SA_AES256_CONTENTS	0x3e000082
+#define SA_AES_CONTENTS		0x3e000002
+
+/**
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+	struct dynamic_sa_ctl ctrl;
+	__le32 key[4];
+	__le32 iv[4];
+	u32 state_ptr;
+	u32 reserved;
+} __packed;
+#define SA_AES128_CCM_LEN	(sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS	0x3e000042
+#define SA_AES_CCM_CONTENTS	0x3e000002
+
+/**
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+	struct dynamic_sa_ctl ctrl;
+	__le32 key[4];
+	__le32 inner_digest[4];
+	__le32 iv[4];
+	u32 state_ptr;
+	u32 reserved;
+} __packed;
+
+#define SA_AES128_GCM_LEN	(sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS	0x3e000442
+#define SA_AES_GCM_CONTENTS	0x3e000402
+
+/**
+ * Security Association (SA) for HASH160: HMAC-SHA1
+ */
+struct dynamic_sa_hash160 {
+	struct dynamic_sa_ctl ctrl;
+	__le32 inner_digest[5];
+	__le32 outer_digest[5];
+	u32 state_ptr;
+	u32 reserved;
+} __attribute__((packed));
+#define SA_HASH160_LEN		(sizeof(struct dynamic_sa_hash160)/4)
+#define SA_HASH160_CONTENTS     0x2000a502
+
+static inline u32
+get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
+{
+	u32 offset;
+
+	offset = cts->sa_contents.bf.key_size
+		+ cts->sa_contents.bf.inner_size
+		+ cts->sa_contents.bf.outer_size
+		+ cts->sa_contents.bf.spi
+		+ cts->sa_contents.bf.seq_num0
+		+ cts->sa_contents.bf.seq_num1
+		+ cts->sa_contents.bf.seq_num_mask0
+		+ cts->sa_contents.bf.seq_num_mask1
+		+ cts->sa_contents.bf.seq_num_mask2
+		+ cts->sa_contents.bf.seq_num_mask3
+		+ cts->sa_contents.bf.iv0
+		+ cts->sa_contents.bf.iv1
+		+ cts->sa_contents.bf.iv2
+		+ cts->sa_contents.bf.iv3;
+
+	return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+{
+	return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+}
+
+static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
+{
+	return (__le32 *) ((unsigned long)cts +
+		sizeof(struct dynamic_sa_ctl) +
+		cts->sa_contents.bf.key_size * 4);
+}
+
+#endif
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
new file mode 100644
index 0000000..5e63742
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -0,0 +1,131 @@
+/*
+ * Generic PowerPC 44x RNG driver
+ *
+ * Copyright 2011 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include "crypto4xx_core.h"
+#include "crypto4xx_trng.h"
+#include "crypto4xx_reg_def.h"
+
+#define PPC4XX_TRNG_CTRL	0x0008
+#define PPC4XX_TRNG_CTRL_DALM	0x20
+#define PPC4XX_TRNG_STAT	0x0004
+#define PPC4XX_TRNG_STAT_B	0x1
+#define PPC4XX_TRNG_DATA	0x0000
+
+static int ppc4xx_trng_data_present(struct hwrng *rng, int wait)
+{
+	struct crypto4xx_device *dev = (void *)rng->priv;
+	int busy, i, present = 0;
+
+	for (i = 0; i < 20; i++) {
+		busy = (in_le32(dev->trng_base + PPC4XX_TRNG_STAT) &
+			PPC4XX_TRNG_STAT_B);
+		if (!busy || !wait) {
+			present = 1;
+			break;
+		}
+		udelay(10);
+	}
+	return present;
+}
+
+static int ppc4xx_trng_data_read(struct hwrng *rng, u32 *data)
+{
+	struct crypto4xx_device *dev = (void *)rng->priv;
+	*data = in_le32(dev->trng_base + PPC4XX_TRNG_DATA);
+	return 4;
+}
+
+static void ppc4xx_trng_enable(struct crypto4xx_device *dev, bool enable)
+{
+	u32 device_ctrl;
+
+	device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+	if (enable)
+		device_ctrl |= PPC4XX_TRNG_EN;
+	else
+		device_ctrl &= ~PPC4XX_TRNG_EN;
+	writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+}
+
+static const struct of_device_id ppc4xx_trng_match[] = {
+	{ .compatible = "ppc4xx-rng", },
+	{ .compatible = "amcc,ppc460ex-rng", },
+	{ .compatible = "amcc,ppc440epx-rng", },
+	{},
+};
+
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
+{
+	struct crypto4xx_device *dev = core_dev->dev;
+	struct device_node *trng = NULL;
+	struct hwrng *rng = NULL;
+	int err;
+
+	/* Find the TRNG device node and map it */
+	trng = of_find_matching_node(NULL, ppc4xx_trng_match);
+	if (!trng || !of_device_is_available(trng))
+		return;
+
+	dev->trng_base = of_iomap(trng, 0);
+	of_node_put(trng);
+	if (!dev->trng_base)
+		goto err_out;
+
+	rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		goto err_out;
+
+	rng->name = KBUILD_MODNAME;
+	rng->data_present = ppc4xx_trng_data_present;
+	rng->data_read = ppc4xx_trng_data_read;
+	rng->priv = (unsigned long) dev;
+	core_dev->trng = rng;
+	ppc4xx_trng_enable(dev, true);
+	out_le32(dev->trng_base + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
+	err = devm_hwrng_register(core_dev->device, core_dev->trng);
+	if (err) {
+		ppc4xx_trng_enable(dev, false);
+		dev_err(core_dev->device, "failed to register hwrng (%d).\n",
+			err);
+		goto err_out;
+	}
+	return;
+
+err_out:
+	of_node_put(trng);
+	iounmap(dev->trng_base);
+	kfree(rng);
+	dev->trng_base = NULL;
+	core_dev->trng = NULL;
+}
+
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev)
+{
+	if (core_dev && core_dev->trng) {
+		struct crypto4xx_device *dev = core_dev->dev;
+
+		devm_hwrng_unregister(core_dev->device, core_dev->trng);
+		ppc4xx_trng_enable(dev, false);
+		iounmap(dev->trng_base);
+		kfree(core_dev->trng);
+	}
+}
+
+MODULE_ALIAS("ppc4xx_rng");
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
new file mode 100644
index 0000000..931d225
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -0,0 +1,34 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file defines the security context
+ * associate format.
+ */
+
+#ifndef __CRYPTO4XX_TRNG_H__
+#define __CRYPTO4XX_TRNG_H__
+
+#ifdef CONFIG_HW_RANDOM_PPC4XX
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
+#else
+static inline void ppc4xx_trng_probe(
+	struct crypto4xx_device *dev __maybe_unused) { }
+static inline void ppc4xx_trng_remove(
+	struct crypto4xx_device *dev __maybe_unused) { }
+#endif
+
+#endif
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
new file mode 100644
index 0000000..62f9d30
--- /dev/null
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ATMEL_AES_REGS_H__
+#define __ATMEL_AES_REGS_H__
+
+#define AES_CR			0x00
+#define AES_CR_START		(1 << 0)
+#define AES_CR_SWRST		(1 << 8)
+#define AES_CR_LOADSEED		(1 << 16)
+
+#define	AES_MR			0x04
+#define AES_MR_CYPHER_DEC		(0 << 0)
+#define AES_MR_CYPHER_ENC		(1 << 0)
+#define AES_MR_GTAGEN			(1 << 1)
+#define	AES_MR_DUALBUFF			(1 << 3)
+#define AES_MR_PROCDLY_MASK		(0xF << 4)
+#define AES_MR_PROCDLY_OFFSET	4
+#define AES_MR_SMOD_MASK		(0x3 << 8)
+#define AES_MR_SMOD_MANUAL		(0x0 << 8)
+#define AES_MR_SMOD_AUTO		(0x1 << 8)
+#define AES_MR_SMOD_IDATAR0		(0x2 << 8)
+#define	AES_MR_KEYSIZE_MASK		(0x3 << 10)
+#define	AES_MR_KEYSIZE_128		(0x0 << 10)
+#define	AES_MR_KEYSIZE_192		(0x1 << 10)
+#define	AES_MR_KEYSIZE_256		(0x2 << 10)
+#define AES_MR_OPMOD_MASK		(0x7 << 12)
+#define AES_MR_OPMOD_ECB		(0x0 << 12)
+#define AES_MR_OPMOD_CBC		(0x1 << 12)
+#define AES_MR_OPMOD_OFB		(0x2 << 12)
+#define AES_MR_OPMOD_CFB		(0x3 << 12)
+#define AES_MR_OPMOD_CTR		(0x4 << 12)
+#define AES_MR_OPMOD_GCM		(0x5 << 12)
+#define AES_MR_OPMOD_XTS		(0x6 << 12)
+#define AES_MR_LOD				(0x1 << 15)
+#define AES_MR_CFBS_MASK		(0x7 << 16)
+#define AES_MR_CFBS_128b		(0x0 << 16)
+#define AES_MR_CFBS_64b			(0x1 << 16)
+#define AES_MR_CFBS_32b			(0x2 << 16)
+#define AES_MR_CFBS_16b			(0x3 << 16)
+#define AES_MR_CFBS_8b			(0x4 << 16)
+#define AES_MR_CKEY_MASK		(0xF << 20)
+#define AES_MR_CKEY_OFFSET		20
+#define AES_MR_CMTYP_MASK		(0x1F << 24)
+#define AES_MR_CMTYP_OFFSET		24
+
+#define	AES_IER		0x10
+#define	AES_IDR		0x14
+#define	AES_IMR		0x18
+#define	AES_ISR		0x1C
+#define AES_INT_DATARDY		(1 << 0)
+#define AES_INT_URAD		(1 << 8)
+#define AES_INT_TAGRDY		(1 << 16)
+#define AES_ISR_URAT_MASK	(0xF << 12)
+#define AES_ISR_URAT_IDR_WR_PROC	(0x0 << 12)
+#define AES_ISR_URAT_ODR_RD_PROC	(0x1 << 12)
+#define AES_ISR_URAT_MR_WR_PROC		(0x2 << 12)
+#define AES_ISR_URAT_ODR_RD_SUBK	(0x3 << 12)
+#define AES_ISR_URAT_MR_WR_SUBK		(0x4 << 12)
+#define AES_ISR_URAT_WOR_RD			(0x5 << 12)
+
+#define AES_KEYWR(x)	(0x20 + ((x) * 0x04))
+#define AES_IDATAR(x)	(0x40 + ((x) * 0x04))
+#define AES_ODATAR(x)	(0x50 + ((x) * 0x04))
+#define AES_IVR(x)		(0x60 + ((x) * 0x04))
+
+#define AES_AADLENR	0x70
+#define AES_CLENR	0x74
+#define AES_GHASHR(x)	(0x78 + ((x) * 0x04))
+#define AES_TAGR(x)	(0x88 + ((x) * 0x04))
+#define AES_CTRR	0x98
+#define AES_GCMHR(x)	(0x9c + ((x) * 0x04))
+
+#define AES_EMR		0xb0
+#define AES_EMR_APEN		BIT(0)	/* Auto Padding Enable */
+#define AES_EMR_APM		BIT(1)	/* Auto Padding Mode */
+#define AES_EMR_APM_IPSEC	0x0
+#define AES_EMR_APM_SSL		BIT(1)
+#define AES_EMR_PLIPEN		BIT(4)	/* PLIP Enable */
+#define AES_EMR_PLIPD		BIT(5)	/* PLIP Decipher */
+#define AES_EMR_PADLEN_MASK	(0xFu << 8)
+#define AES_EMR_PADLEN_OFFSET	8
+#define AES_EMR_PADLEN(padlen)	(((padlen) << AES_EMR_PADLEN_OFFSET) &\
+				 AES_EMR_PADLEN_MASK)
+#define AES_EMR_NHEAD_MASK	(0xFu << 16)
+#define AES_EMR_NHEAD_OFFSET	16
+#define AES_EMR_NHEAD(nhead)	(((nhead) << AES_EMR_NHEAD_OFFSET) &\
+				 AES_EMR_NHEAD_MASK)
+
+#define AES_TWR(x)	(0xc0 + ((x) * 0x04))
+#define AES_ALPHAR(x)	(0xd0 + ((x) * 0x04))
+
+#define AES_HW_VERSION	0xFC
+
+#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
new file mode 100644
index 0000000..801aeab
--- /dev/null
+++ b/drivers/crypto/atmel-aes.c
@@ -0,0 +1,2802 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL AES HW acceleration.
+ *
+ * Copyright (c) 2012 Eukréa Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-aes.c driver.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/xts.h>
+#include <crypto/internal/aead.h>
+#include <linux/platform_data/crypto-atmel.h>
+#include <dt-bindings/dma/at91.h>
+#include "atmel-aes-regs.h"
+#include "atmel-authenc.h"
+
+#define ATMEL_AES_PRIORITY	300
+
+#define ATMEL_AES_BUFFER_ORDER	2
+#define ATMEL_AES_BUFFER_SIZE	(PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
+
+#define CFB8_BLOCK_SIZE		1
+#define CFB16_BLOCK_SIZE	2
+#define CFB32_BLOCK_SIZE	4
+#define CFB64_BLOCK_SIZE	8
+
+#define SIZE_IN_WORDS(x)	((x) >> 2)
+
+/* AES flags */
+/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
+#define AES_FLAGS_ENCRYPT	AES_MR_CYPHER_ENC
+#define AES_FLAGS_GTAGEN	AES_MR_GTAGEN
+#define AES_FLAGS_OPMODE_MASK	(AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
+#define AES_FLAGS_ECB		AES_MR_OPMOD_ECB
+#define AES_FLAGS_CBC		AES_MR_OPMOD_CBC
+#define AES_FLAGS_OFB		AES_MR_OPMOD_OFB
+#define AES_FLAGS_CFB128	(AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
+#define AES_FLAGS_CFB64		(AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
+#define AES_FLAGS_CFB32		(AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
+#define AES_FLAGS_CFB16		(AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
+#define AES_FLAGS_CFB8		(AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
+#define AES_FLAGS_CTR		AES_MR_OPMOD_CTR
+#define AES_FLAGS_GCM		AES_MR_OPMOD_GCM
+#define AES_FLAGS_XTS		AES_MR_OPMOD_XTS
+
+#define AES_FLAGS_MODE_MASK	(AES_FLAGS_OPMODE_MASK |	\
+				 AES_FLAGS_ENCRYPT |		\
+				 AES_FLAGS_GTAGEN)
+
+#define AES_FLAGS_BUSY		BIT(3)
+#define AES_FLAGS_DUMP_REG	BIT(4)
+#define AES_FLAGS_OWN_SHA	BIT(5)
+
+#define AES_FLAGS_PERSISTENT	AES_FLAGS_BUSY
+
+#define ATMEL_AES_QUEUE_LENGTH	50
+
+#define ATMEL_AES_DMA_THRESHOLD		256
+
+
+struct atmel_aes_caps {
+	bool			has_dualbuff;
+	bool			has_cfb64;
+	bool			has_ctr32;
+	bool			has_gcm;
+	bool			has_xts;
+	bool			has_authenc;
+	u32			max_burst_size;
+};
+
+struct atmel_aes_dev;
+
+
+typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
+
+
+struct atmel_aes_base_ctx {
+	struct atmel_aes_dev	*dd;
+	atmel_aes_fn_t		start;
+	int			keylen;
+	u32			key[AES_KEYSIZE_256 / sizeof(u32)];
+	u16			block_size;
+	bool			is_aead;
+};
+
+struct atmel_aes_ctx {
+	struct atmel_aes_base_ctx	base;
+};
+
+struct atmel_aes_ctr_ctx {
+	struct atmel_aes_base_ctx	base;
+
+	u32			iv[AES_BLOCK_SIZE / sizeof(u32)];
+	size_t			offset;
+	struct scatterlist	src[2];
+	struct scatterlist	dst[2];
+};
+
+struct atmel_aes_gcm_ctx {
+	struct atmel_aes_base_ctx	base;
+
+	struct scatterlist	src[2];
+	struct scatterlist	dst[2];
+
+	u32			j0[AES_BLOCK_SIZE / sizeof(u32)];
+	u32			tag[AES_BLOCK_SIZE / sizeof(u32)];
+	u32			ghash[AES_BLOCK_SIZE / sizeof(u32)];
+	size_t			textlen;
+
+	const u32		*ghash_in;
+	u32			*ghash_out;
+	atmel_aes_fn_t		ghash_resume;
+};
+
+struct atmel_aes_xts_ctx {
+	struct atmel_aes_base_ctx	base;
+
+	u32			key2[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+struct atmel_aes_authenc_ctx {
+	struct atmel_aes_base_ctx	base;
+	struct atmel_sha_authenc_ctx	*auth;
+};
+#endif
+
+struct atmel_aes_reqctx {
+	unsigned long		mode;
+	u32			lastc[AES_BLOCK_SIZE / sizeof(u32)];
+};
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+struct atmel_aes_authenc_reqctx {
+	struct atmel_aes_reqctx	base;
+
+	struct scatterlist	src[2];
+	struct scatterlist	dst[2];
+	size_t			textlen;
+	u32			digest[SHA512_DIGEST_SIZE / sizeof(u32)];
+
+	/* auth_req MUST be place last. */
+	struct ahash_request	auth_req;
+};
+#endif
+
+struct atmel_aes_dma {
+	struct dma_chan		*chan;
+	struct scatterlist	*sg;
+	int			nents;
+	unsigned int		remainder;
+	unsigned int		sg_len;
+};
+
+struct atmel_aes_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	void __iomem		*io_base;
+
+	struct crypto_async_request	*areq;
+	struct atmel_aes_base_ctx	*ctx;
+
+	bool			is_async;
+	atmel_aes_fn_t		resume;
+	atmel_aes_fn_t		cpu_transfer_complete;
+
+	struct device		*dev;
+	struct clk		*iclk;
+	int			irq;
+
+	unsigned long		flags;
+
+	spinlock_t		lock;
+	struct crypto_queue	queue;
+
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
+
+	size_t			total;
+	size_t			datalen;
+	u32			*data;
+
+	struct atmel_aes_dma	src;
+	struct atmel_aes_dma	dst;
+
+	size_t			buflen;
+	void			*buf;
+	struct scatterlist	aligned_sg;
+	struct scatterlist	*real_dst;
+
+	struct atmel_aes_caps	caps;
+
+	u32			hw_version;
+};
+
+struct atmel_aes_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock;
+};
+
+static struct atmel_aes_drv atmel_aes = {
+	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
+};
+
+#ifdef VERBOSE_DEBUG
+static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
+{
+	switch (offset) {
+	case AES_CR:
+		return "CR";
+
+	case AES_MR:
+		return "MR";
+
+	case AES_ISR:
+		return "ISR";
+
+	case AES_IMR:
+		return "IMR";
+
+	case AES_IER:
+		return "IER";
+
+	case AES_IDR:
+		return "IDR";
+
+	case AES_KEYWR(0):
+	case AES_KEYWR(1):
+	case AES_KEYWR(2):
+	case AES_KEYWR(3):
+	case AES_KEYWR(4):
+	case AES_KEYWR(5):
+	case AES_KEYWR(6):
+	case AES_KEYWR(7):
+		snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
+		break;
+
+	case AES_IDATAR(0):
+	case AES_IDATAR(1):
+	case AES_IDATAR(2):
+	case AES_IDATAR(3):
+		snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
+		break;
+
+	case AES_ODATAR(0):
+	case AES_ODATAR(1):
+	case AES_ODATAR(2):
+	case AES_ODATAR(3):
+		snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
+		break;
+
+	case AES_IVR(0):
+	case AES_IVR(1):
+	case AES_IVR(2):
+	case AES_IVR(3):
+		snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
+		break;
+
+	case AES_AADLENR:
+		return "AADLENR";
+
+	case AES_CLENR:
+		return "CLENR";
+
+	case AES_GHASHR(0):
+	case AES_GHASHR(1):
+	case AES_GHASHR(2):
+	case AES_GHASHR(3):
+		snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
+		break;
+
+	case AES_TAGR(0):
+	case AES_TAGR(1):
+	case AES_TAGR(2):
+	case AES_TAGR(3):
+		snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
+		break;
+
+	case AES_CTRR:
+		return "CTRR";
+
+	case AES_GCMHR(0):
+	case AES_GCMHR(1):
+	case AES_GCMHR(2):
+	case AES_GCMHR(3):
+		snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
+		break;
+
+	case AES_EMR:
+		return "EMR";
+
+	case AES_TWR(0):
+	case AES_TWR(1):
+	case AES_TWR(2):
+	case AES_TWR(3):
+		snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
+		break;
+
+	case AES_ALPHAR(0):
+	case AES_ALPHAR(1):
+	case AES_ALPHAR(2):
+	case AES_ALPHAR(3):
+		snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
+		break;
+
+	default:
+		snprintf(tmp, sz, "0x%02x", offset);
+		break;
+	}
+
+	return tmp;
+}
+#endif /* VERBOSE_DEBUG */
+
+/* Shared functions */
+
+static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
+{
+	u32 value = readl_relaxed(dd->io_base + offset);
+
+#ifdef VERBOSE_DEBUG
+	if (dd->flags & AES_FLAGS_DUMP_REG) {
+		char tmp[16];
+
+		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
+			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
+	}
+#endif /* VERBOSE_DEBUG */
+
+	return value;
+}
+
+static inline void atmel_aes_write(struct atmel_aes_dev *dd,
+					u32 offset, u32 value)
+{
+#ifdef VERBOSE_DEBUG
+	if (dd->flags & AES_FLAGS_DUMP_REG) {
+		char tmp[16];
+
+		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
+			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
+	}
+#endif /* VERBOSE_DEBUG */
+
+	writel_relaxed(value, dd->io_base + offset);
+}
+
+static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
+					u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		*value = atmel_aes_read(dd, offset);
+}
+
+static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
+			      const u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		atmel_aes_write(dd, offset, *value);
+}
+
+static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
+					u32 *value)
+{
+	atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
+}
+
+static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
+					 const u32 *value)
+{
+	atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
+}
+
+static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
+						atmel_aes_fn_t resume)
+{
+	u32 isr = atmel_aes_read(dd, AES_ISR);
+
+	if (unlikely(isr & AES_INT_DATARDY))
+		return resume(dd);
+
+	dd->resume = resume;
+	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+	return -EINPROGRESS;
+}
+
+static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
+{
+	len &= block_size - 1;
+	return len ? block_size - len : 0;
+}
+
+static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
+{
+	struct atmel_aes_dev *aes_dd = NULL;
+	struct atmel_aes_dev *tmp;
+
+	spin_lock_bh(&atmel_aes.lock);
+	if (!ctx->dd) {
+		list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
+			aes_dd = tmp;
+			break;
+		}
+		ctx->dd = aes_dd;
+	} else {
+		aes_dd = ctx->dd;
+	}
+
+	spin_unlock_bh(&atmel_aes.lock);
+
+	return aes_dd;
+}
+
+static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
+{
+	int err;
+
+	err = clk_enable(dd->iclk);
+	if (err)
+		return err;
+
+	atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
+	atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
+
+	return 0;
+}
+
+static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
+{
+	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
+}
+
+static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
+{
+	int err;
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return err;
+
+	dd->hw_version = atmel_aes_get_version(dd);
+
+	dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
+
+	clk_disable(dd->iclk);
+	return 0;
+}
+
+static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
+				      const struct atmel_aes_reqctx *rctx)
+{
+	/* Clear all but persistent flags and set request flags. */
+	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
+}
+
+static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
+{
+	return (dd->flags & AES_FLAGS_ENCRYPT);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
+#endif
+
+static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+{
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+	if (dd->ctx->is_aead)
+		atmel_aes_authenc_complete(dd, err);
+#endif
+
+	clk_disable(dd->iclk);
+	dd->flags &= ~AES_FLAGS_BUSY;
+
+	if (!dd->ctx->is_aead) {
+		struct ablkcipher_request *req =
+			ablkcipher_request_cast(dd->areq);
+		struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+		struct crypto_ablkcipher *ablkcipher =
+			crypto_ablkcipher_reqtfm(req);
+		int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+		if (rctx->mode & AES_FLAGS_ENCRYPT) {
+			scatterwalk_map_and_copy(req->info, req->dst,
+				req->nbytes - ivsize, ivsize, 0);
+		} else {
+			if (req->src == req->dst) {
+				memcpy(req->info, rctx->lastc, ivsize);
+			} else {
+				scatterwalk_map_and_copy(req->info, req->src,
+					req->nbytes - ivsize, ivsize, 0);
+			}
+		}
+	}
+
+	if (dd->is_async)
+		dd->areq->complete(dd->areq, err);
+
+	tasklet_schedule(&dd->queue_task);
+
+	return err;
+}
+
+static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
+				     const u32 *iv, const u32 *key, int keylen)
+{
+	u32 valmr = 0;
+
+	/* MR register must be set before IV registers */
+	if (keylen == AES_KEYSIZE_128)
+		valmr |= AES_MR_KEYSIZE_128;
+	else if (keylen == AES_KEYSIZE_192)
+		valmr |= AES_MR_KEYSIZE_192;
+	else
+		valmr |= AES_MR_KEYSIZE_256;
+
+	valmr |= dd->flags & AES_FLAGS_MODE_MASK;
+
+	if (use_dma) {
+		valmr |= AES_MR_SMOD_IDATAR0;
+		if (dd->caps.has_dualbuff)
+			valmr |= AES_MR_DUALBUFF;
+	} else {
+		valmr |= AES_MR_SMOD_AUTO;
+	}
+
+	atmel_aes_write(dd, AES_MR, valmr);
+
+	atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
+
+	if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
+		atmel_aes_write_block(dd, AES_IVR(0), iv);
+}
+
+static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
+					const u32 *iv)
+
+{
+	atmel_aes_write_ctrl_key(dd, use_dma, iv,
+				 dd->ctx->key, dd->ctx->keylen);
+}
+
+/* CPU transfer */
+
+static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
+{
+	int err = 0;
+	u32 isr;
+
+	for (;;) {
+		atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
+		dd->data += 4;
+		dd->datalen -= AES_BLOCK_SIZE;
+
+		if (dd->datalen < AES_BLOCK_SIZE)
+			break;
+
+		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
+
+		isr = atmel_aes_read(dd, AES_ISR);
+		if (!(isr & AES_INT_DATARDY)) {
+			dd->resume = atmel_aes_cpu_transfer;
+			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+			return -EINPROGRESS;
+		}
+	}
+
+	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
+				 dd->buf, dd->total))
+		err = -EINVAL;
+
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	return dd->cpu_transfer_complete(dd);
+}
+
+static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
+			       struct scatterlist *src,
+			       struct scatterlist *dst,
+			       size_t len,
+			       atmel_aes_fn_t resume)
+{
+	size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
+
+	if (unlikely(len == 0))
+		return -EINVAL;
+
+	sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
+
+	dd->total = len;
+	dd->real_dst = dst;
+	dd->cpu_transfer_complete = resume;
+	dd->datalen = len + padlen;
+	dd->data = (u32 *)dd->buf;
+	atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
+}
+
+
+/* DMA transfer */
+
+static void atmel_aes_dma_callback(void *data);
+
+static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
+				    struct scatterlist *sg,
+				    size_t len,
+				    struct atmel_aes_dma *dma)
+{
+	int nents;
+
+	if (!IS_ALIGNED(len, dd->ctx->block_size))
+		return false;
+
+	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
+		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+			return false;
+
+		if (len <= sg->length) {
+			if (!IS_ALIGNED(len, dd->ctx->block_size))
+				return false;
+
+			dma->nents = nents+1;
+			dma->remainder = sg->length - len;
+			sg->length = len;
+			return true;
+		}
+
+		if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
+			return false;
+
+		len -= sg->length;
+	}
+
+	return false;
+}
+
+static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
+{
+	struct scatterlist *sg = dma->sg;
+	int nents = dma->nents;
+
+	if (!dma->remainder)
+		return;
+
+	while (--nents > 0 && sg)
+		sg = sg_next(sg);
+
+	if (!sg)
+		return;
+
+	sg->length += dma->remainder;
+}
+
+static int atmel_aes_map(struct atmel_aes_dev *dd,
+			 struct scatterlist *src,
+			 struct scatterlist *dst,
+			 size_t len)
+{
+	bool src_aligned, dst_aligned;
+	size_t padlen;
+
+	dd->total = len;
+	dd->src.sg = src;
+	dd->dst.sg = dst;
+	dd->real_dst = dst;
+
+	src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
+	if (src == dst)
+		dst_aligned = src_aligned;
+	else
+		dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
+	if (!src_aligned || !dst_aligned) {
+		padlen = atmel_aes_padlen(len, dd->ctx->block_size);
+
+		if (dd->buflen < len + padlen)
+			return -ENOMEM;
+
+		if (!src_aligned) {
+			sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
+			dd->src.sg = &dd->aligned_sg;
+			dd->src.nents = 1;
+			dd->src.remainder = 0;
+		}
+
+		if (!dst_aligned) {
+			dd->dst.sg = &dd->aligned_sg;
+			dd->dst.nents = 1;
+			dd->dst.remainder = 0;
+		}
+
+		sg_init_table(&dd->aligned_sg, 1);
+		sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
+	}
+
+	if (dd->src.sg == dd->dst.sg) {
+		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
+					    DMA_BIDIRECTIONAL);
+		dd->dst.sg_len = dd->src.sg_len;
+		if (!dd->src.sg_len)
+			return -EFAULT;
+	} else {
+		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
+					    DMA_TO_DEVICE);
+		if (!dd->src.sg_len)
+			return -EFAULT;
+
+		dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
+					    DMA_FROM_DEVICE);
+		if (!dd->dst.sg_len) {
+			dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
+				     DMA_TO_DEVICE);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static void atmel_aes_unmap(struct atmel_aes_dev *dd)
+{
+	if (dd->src.sg == dd->dst.sg) {
+		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
+			     DMA_BIDIRECTIONAL);
+
+		if (dd->src.sg != &dd->aligned_sg)
+			atmel_aes_restore_sg(&dd->src);
+	} else {
+		dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
+			     DMA_FROM_DEVICE);
+
+		if (dd->dst.sg != &dd->aligned_sg)
+			atmel_aes_restore_sg(&dd->dst);
+
+		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
+			     DMA_TO_DEVICE);
+
+		if (dd->src.sg != &dd->aligned_sg)
+			atmel_aes_restore_sg(&dd->src);
+	}
+
+	if (dd->dst.sg == &dd->aligned_sg)
+		sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
+				    dd->buf, dd->total);
+}
+
+static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
+					enum dma_slave_buswidth addr_width,
+					enum dma_transfer_direction dir,
+					u32 maxburst)
+{
+	struct dma_async_tx_descriptor *desc;
+	struct dma_slave_config config;
+	dma_async_tx_callback callback;
+	struct atmel_aes_dma *dma;
+	int err;
+
+	memset(&config, 0, sizeof(config));
+	config.direction = dir;
+	config.src_addr_width = addr_width;
+	config.dst_addr_width = addr_width;
+	config.src_maxburst = maxburst;
+	config.dst_maxburst = maxburst;
+
+	switch (dir) {
+	case DMA_MEM_TO_DEV:
+		dma = &dd->src;
+		callback = NULL;
+		config.dst_addr = dd->phys_base + AES_IDATAR(0);
+		break;
+
+	case DMA_DEV_TO_MEM:
+		dma = &dd->dst;
+		callback = atmel_aes_dma_callback;
+		config.src_addr = dd->phys_base + AES_ODATAR(0);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	err = dmaengine_slave_config(dma->chan, &config);
+	if (err)
+		return err;
+
+	desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
+				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc)
+		return -ENOMEM;
+
+	desc->callback = callback;
+	desc->callback_param = dd;
+	dmaengine_submit(desc);
+	dma_async_issue_pending(dma->chan);
+
+	return 0;
+}
+
+static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
+					enum dma_transfer_direction dir)
+{
+	struct atmel_aes_dma *dma;
+
+	switch (dir) {
+	case DMA_MEM_TO_DEV:
+		dma = &dd->src;
+		break;
+
+	case DMA_DEV_TO_MEM:
+		dma = &dd->dst;
+		break;
+
+	default:
+		return;
+	}
+
+	dmaengine_terminate_all(dma->chan);
+}
+
+static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
+			       struct scatterlist *src,
+			       struct scatterlist *dst,
+			       size_t len,
+			       atmel_aes_fn_t resume)
+{
+	enum dma_slave_buswidth addr_width;
+	u32 maxburst;
+	int err;
+
+	switch (dd->ctx->block_size) {
+	case CFB8_BLOCK_SIZE:
+		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		maxburst = 1;
+		break;
+
+	case CFB16_BLOCK_SIZE:
+		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		maxburst = 1;
+		break;
+
+	case CFB32_BLOCK_SIZE:
+	case CFB64_BLOCK_SIZE:
+		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		maxburst = 1;
+		break;
+
+	case AES_BLOCK_SIZE:
+		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		maxburst = dd->caps.max_burst_size;
+		break;
+
+	default:
+		err = -EINVAL;
+		goto exit;
+	}
+
+	err = atmel_aes_map(dd, src, dst, len);
+	if (err)
+		goto exit;
+
+	dd->resume = resume;
+
+	/* Set output DMA transfer first */
+	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
+					   maxburst);
+	if (err)
+		goto unmap;
+
+	/* Then set input DMA transfer */
+	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
+					   maxburst);
+	if (err)
+		goto output_transfer_stop;
+
+	return -EINPROGRESS;
+
+output_transfer_stop:
+	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+unmap:
+	atmel_aes_unmap(dd);
+exit:
+	return atmel_aes_complete(dd, err);
+}
+
+static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
+{
+	atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
+	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
+	atmel_aes_unmap(dd);
+}
+
+static void atmel_aes_dma_callback(void *data)
+{
+	struct atmel_aes_dev *dd = data;
+
+	atmel_aes_dma_stop(dd);
+	dd->is_async = true;
+	(void)dd->resume(dd);
+}
+
+static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
+				  struct crypto_async_request *new_areq)
+{
+	struct crypto_async_request *areq, *backlog;
+	struct atmel_aes_base_ctx *ctx;
+	unsigned long flags;
+	bool start_async;
+	int err, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (new_areq)
+		ret = crypto_enqueue_request(&dd->queue, new_areq);
+	if (dd->flags & AES_FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+	backlog = crypto_get_backlog(&dd->queue);
+	areq = crypto_dequeue_request(&dd->queue);
+	if (areq)
+		dd->flags |= AES_FLAGS_BUSY;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!areq)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	ctx = crypto_tfm_ctx(areq->tfm);
+
+	dd->areq = areq;
+	dd->ctx = ctx;
+	start_async = (areq != new_areq);
+	dd->is_async = start_async;
+
+	/* WARNING: ctx->start() MAY change dd->is_async. */
+	err = ctx->start(dd);
+	return (start_async) ? ret : err;
+}
+
+
+/* AES async block ciphers */
+
+static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
+{
+	return atmel_aes_complete(dd, 0);
+}
+
+static int atmel_aes_start(struct atmel_aes_dev *dd)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
+			dd->ctx->block_size != AES_BLOCK_SIZE);
+	int err;
+
+	atmel_aes_set_mode(dd, rctx);
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	atmel_aes_write_ctrl(dd, use_dma, req->info);
+	if (use_dma)
+		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+					   atmel_aes_transfer_complete);
+
+	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+				   atmel_aes_transfer_complete);
+}
+
+static inline struct atmel_aes_ctr_ctx *
+atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+	return container_of(ctx, struct atmel_aes_ctr_ctx, base);
+}
+
+static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	struct scatterlist *src, *dst;
+	u32 ctr, blocks;
+	size_t datalen;
+	bool use_dma, fragmented = false;
+
+	/* Check for transfer completion. */
+	ctx->offset += dd->total;
+	if (ctx->offset >= req->nbytes)
+		return atmel_aes_transfer_complete(dd);
+
+	/* Compute data length. */
+	datalen = req->nbytes - ctx->offset;
+	blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+	ctr = be32_to_cpu(ctx->iv[3]);
+	if (dd->caps.has_ctr32) {
+		/* Check 32bit counter overflow. */
+		u32 start = ctr;
+		u32 end = start + blocks - 1;
+
+		if (end < start) {
+			ctr |= 0xffffffff;
+			datalen = AES_BLOCK_SIZE * -start;
+			fragmented = true;
+		}
+	} else {
+		/* Check 16bit counter overflow. */
+		u16 start = ctr & 0xffff;
+		u16 end = start + (u16)blocks - 1;
+
+		if (blocks >> 16 || end < start) {
+			ctr |= 0xffff;
+			datalen = AES_BLOCK_SIZE * (0x10000-start);
+			fragmented = true;
+		}
+	}
+	use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
+
+	/* Jump to offset. */
+	src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
+	dst = ((req->src == req->dst) ? src :
+	       scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
+
+	/* Configure hardware. */
+	atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
+	if (unlikely(fragmented)) {
+		/*
+		 * Increment the counter manually to cope with the hardware
+		 * counter overflow.
+		 */
+		ctx->iv[3] = cpu_to_be32(ctr);
+		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
+	}
+
+	if (use_dma)
+		return atmel_aes_dma_start(dd, src, dst, datalen,
+					   atmel_aes_ctr_transfer);
+
+	return atmel_aes_cpu_start(dd, src, dst, datalen,
+				   atmel_aes_ctr_transfer);
+}
+
+static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	int err;
+
+	atmel_aes_set_mode(dd, rctx);
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
+	ctx->offset = 0;
+	dd->total = 0;
+	return atmel_aes_ctr_transfer(dd);
+}
+
+static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct atmel_aes_reqctx *rctx;
+	struct atmel_aes_dev *dd;
+
+	switch (mode & AES_FLAGS_OPMODE_MASK) {
+	case AES_FLAGS_CFB8:
+		ctx->block_size = CFB8_BLOCK_SIZE;
+		break;
+
+	case AES_FLAGS_CFB16:
+		ctx->block_size = CFB16_BLOCK_SIZE;
+		break;
+
+	case AES_FLAGS_CFB32:
+		ctx->block_size = CFB32_BLOCK_SIZE;
+		break;
+
+	case AES_FLAGS_CFB64:
+		ctx->block_size = CFB64_BLOCK_SIZE;
+		break;
+
+	default:
+		ctx->block_size = AES_BLOCK_SIZE;
+		break;
+	}
+	ctx->is_aead = false;
+
+	dd = atmel_aes_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->mode = mode;
+
+	if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+		int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+		scatterwalk_map_and_copy(rctx->lastc, req->src,
+			(req->nbytes - ivsize), ivsize, 0);
+	}
+
+	return atmel_aes_handle_queue(dd, &req->base);
+}
+
+static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_128 &&
+	    keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_ECB);
+}
+
+static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CBC);
+}
+
+static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_OFB);
+}
+
+static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB128);
+}
+
+static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB64);
+}
+
+static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB32);
+}
+
+static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB16);
+}
+
+static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CFB8);
+}
+
+static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_CTR);
+}
+
+static int atmel_aes_cra_init(struct crypto_tfm *tfm)
+{
+	struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+	ctx->base.start = atmel_aes_start;
+
+	return 0;
+}
+
+static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+	ctx->base.start = atmel_aes_ctr_start;
+
+	return 0;
+}
+
+static struct crypto_alg aes_algs[] = {
+{
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "atmel-ecb-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_ecb_encrypt,
+		.decrypt	= atmel_aes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "atmel-cbc-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cbc_encrypt,
+		.decrypt	= atmel_aes_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "ofb(aes)",
+	.cra_driver_name	= "atmel-ofb-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_ofb_encrypt,
+		.decrypt	= atmel_aes_ofb_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb(aes)",
+	.cra_driver_name	= "atmel-cfb-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb_encrypt,
+		.decrypt	= atmel_aes_cfb_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb32(aes)",
+	.cra_driver_name	= "atmel-cfb32-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB32_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x3,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb32_encrypt,
+		.decrypt	= atmel_aes_cfb32_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb16(aes)",
+	.cra_driver_name	= "atmel-cfb16-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB16_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x1,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb16_encrypt,
+		.decrypt	= atmel_aes_cfb16_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb8(aes)",
+	.cra_driver_name	= "atmel-cfb8-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB8_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb8_encrypt,
+		.decrypt	= atmel_aes_cfb8_decrypt,
+	}
+},
+{
+	.cra_name		= "ctr(aes)",
+	.cra_driver_name	= "atmel-ctr-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctr_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_ctr_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_ctr_encrypt,
+		.decrypt	= atmel_aes_ctr_decrypt,
+	}
+},
+};
+
+static struct crypto_alg aes_cfb64_alg = {
+	.cra_name		= "cfb64(aes)",
+	.cra_driver_name	= "atmel-cfb64-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB64_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_setkey,
+		.encrypt	= atmel_aes_cfb64_encrypt,
+		.decrypt	= atmel_aes_cfb64_decrypt,
+	}
+};
+
+
+/* gcm aead functions */
+
+static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
+			       const u32 *data, size_t datalen,
+			       const u32 *ghash_in, u32 *ghash_out,
+			       atmel_aes_fn_t resume);
+static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
+
+static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
+static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
+
+static inline struct atmel_aes_gcm_ctx *
+atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+	return container_of(ctx, struct atmel_aes_gcm_ctx, base);
+}
+
+static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
+			       const u32 *data, size_t datalen,
+			       const u32 *ghash_in, u32 *ghash_out,
+			       atmel_aes_fn_t resume)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+
+	dd->data = (u32 *)data;
+	dd->datalen = datalen;
+	ctx->ghash_in = ghash_in;
+	ctx->ghash_out = ghash_out;
+	ctx->ghash_resume = resume;
+
+	atmel_aes_write_ctrl(dd, false, NULL);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
+}
+
+static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+
+	/* Set the data length. */
+	atmel_aes_write(dd, AES_AADLENR, dd->total);
+	atmel_aes_write(dd, AES_CLENR, 0);
+
+	/* If needed, overwrite the GCM Intermediate Hash Word Registers */
+	if (ctx->ghash_in)
+		atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
+
+	return atmel_aes_gcm_ghash_finalize(dd);
+}
+
+static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	u32 isr;
+
+	/* Write data into the Input Data Registers. */
+	while (dd->datalen > 0) {
+		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
+		dd->data += 4;
+		dd->datalen -= AES_BLOCK_SIZE;
+
+		isr = atmel_aes_read(dd, AES_ISR);
+		if (!(isr & AES_INT_DATARDY)) {
+			dd->resume = atmel_aes_gcm_ghash_finalize;
+			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+			return -EINPROGRESS;
+		}
+	}
+
+	/* Read the computed hash from GHASHRx. */
+	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
+
+	return ctx->ghash_resume(dd);
+}
+
+
+static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
+	size_t ivsize = crypto_aead_ivsize(tfm);
+	size_t datalen, padlen;
+	const void *iv = req->iv;
+	u8 *data = dd->buf;
+	int err;
+
+	atmel_aes_set_mode(dd, rctx);
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	if (likely(ivsize == GCM_AES_IV_SIZE)) {
+		memcpy(ctx->j0, iv, ivsize);
+		ctx->j0[3] = cpu_to_be32(1);
+		return atmel_aes_gcm_process(dd);
+	}
+
+	padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
+	datalen = ivsize + padlen + AES_BLOCK_SIZE;
+	if (datalen > dd->buflen)
+		return atmel_aes_complete(dd, -EINVAL);
+
+	memcpy(data, iv, ivsize);
+	memset(data + ivsize, 0, padlen + sizeof(u64));
+	((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
+
+	return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
+				   NULL, ctx->j0, atmel_aes_gcm_process);
+}
+
+static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	bool enc = atmel_aes_is_encrypt(dd);
+	u32 authsize;
+
+	/* Compute text length. */
+	authsize = crypto_aead_authsize(tfm);
+	ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
+
+	/*
+	 * According to tcrypt test suite, the GCM Automatic Tag Generation
+	 * fails when both the message and its associated data are empty.
+	 */
+	if (likely(req->assoclen != 0 || ctx->textlen != 0))
+		dd->flags |= AES_FLAGS_GTAGEN;
+
+	atmel_aes_write_ctrl(dd, false, NULL);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
+}
+
+static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	u32 j0_lsw, *j0 = ctx->j0;
+	size_t padlen;
+
+	/* Write incr32(J0) into IV. */
+	j0_lsw = j0[3];
+	j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
+	atmel_aes_write_block(dd, AES_IVR(0), j0);
+	j0[3] = j0_lsw;
+
+	/* Set aad and text lengths. */
+	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
+	atmel_aes_write(dd, AES_CLENR, ctx->textlen);
+
+	/* Check whether AAD are present. */
+	if (unlikely(req->assoclen == 0)) {
+		dd->datalen = 0;
+		return atmel_aes_gcm_data(dd);
+	}
+
+	/* Copy assoc data and add padding. */
+	padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
+	if (unlikely(req->assoclen + padlen > dd->buflen))
+		return atmel_aes_complete(dd, -EINVAL);
+	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
+
+	/* Write assoc data into the Input Data register. */
+	dd->data = (u32 *)dd->buf;
+	dd->datalen = req->assoclen + padlen;
+	return atmel_aes_gcm_data(dd);
+}
+
+static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
+	struct scatterlist *src, *dst;
+	u32 isr, mr;
+
+	/* Write AAD first. */
+	while (dd->datalen > 0) {
+		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
+		dd->data += 4;
+		dd->datalen -= AES_BLOCK_SIZE;
+
+		isr = atmel_aes_read(dd, AES_ISR);
+		if (!(isr & AES_INT_DATARDY)) {
+			dd->resume = atmel_aes_gcm_data;
+			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+			return -EINPROGRESS;
+		}
+	}
+
+	/* GMAC only. */
+	if (unlikely(ctx->textlen == 0))
+		return atmel_aes_gcm_tag_init(dd);
+
+	/* Prepare src and dst scatter lists to transfer cipher/plain texts */
+	src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
+	dst = ((req->src == req->dst) ? src :
+	       scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
+
+	if (use_dma) {
+		/* Update the Mode Register for DMA transfers. */
+		mr = atmel_aes_read(dd, AES_MR);
+		mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
+		mr |= AES_MR_SMOD_IDATAR0;
+		if (dd->caps.has_dualbuff)
+			mr |= AES_MR_DUALBUFF;
+		atmel_aes_write(dd, AES_MR, mr);
+
+		return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
+					   atmel_aes_gcm_tag_init);
+	}
+
+	return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
+				   atmel_aes_gcm_tag_init);
+}
+
+static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	u64 *data = dd->buf;
+
+	if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
+		if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
+			dd->resume = atmel_aes_gcm_tag_init;
+			atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
+			return -EINPROGRESS;
+		}
+
+		return atmel_aes_gcm_finalize(dd);
+	}
+
+	/* Read the GCM Intermediate Hash Word Registers. */
+	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
+
+	data[0] = cpu_to_be64(req->assoclen * 8);
+	data[1] = cpu_to_be64(ctx->textlen * 8);
+
+	return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
+				   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
+}
+
+static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	unsigned long flags;
+
+	/*
+	 * Change mode to CTR to complete the tag generation.
+	 * Use J0 as Initialization Vector.
+	 */
+	flags = dd->flags;
+	dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
+	dd->flags |= AES_FLAGS_CTR;
+	atmel_aes_write_ctrl(dd, false, ctx->j0);
+	dd->flags = flags;
+
+	atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
+}
+
+static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	bool enc = atmel_aes_is_encrypt(dd);
+	u32 offset, authsize, itag[4], *otag = ctx->tag;
+	int err;
+
+	/* Read the computed tag. */
+	if (likely(dd->flags & AES_FLAGS_GTAGEN))
+		atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
+	else
+		atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
+
+	offset = req->assoclen + ctx->textlen;
+	authsize = crypto_aead_authsize(tfm);
+	if (enc) {
+		scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
+		err = 0;
+	} else {
+		scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
+		err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
+	}
+
+	return atmel_aes_complete(dd, err);
+}
+
+static int atmel_aes_gcm_crypt(struct aead_request *req,
+			       unsigned long mode)
+{
+	struct atmel_aes_base_ctx *ctx;
+	struct atmel_aes_reqctx *rctx;
+	struct atmel_aes_dev *dd;
+
+	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	ctx->block_size = AES_BLOCK_SIZE;
+	ctx->is_aead = true;
+
+	dd = atmel_aes_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
+
+	rctx = aead_request_ctx(req);
+	rctx->mode = AES_FLAGS_GCM | mode;
+
+	return atmel_aes_handle_queue(dd, &req->base);
+}
+
+static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+				unsigned int keylen)
+{
+	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_256 &&
+	    keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_128) {
+		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
+				     unsigned int authsize)
+{
+	/* Same as crypto_gcm_authsize() from crypto/gcm.c */
+	switch (authsize) {
+	case 4:
+	case 8:
+	case 12:
+	case 13:
+	case 14:
+	case 15:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int atmel_aes_gcm_encrypt(struct aead_request *req)
+{
+	return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_gcm_decrypt(struct aead_request *req)
+{
+	return atmel_aes_gcm_crypt(req, 0);
+}
+
+static int atmel_aes_gcm_init(struct crypto_aead *tfm)
+{
+	struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+	ctx->base.start = atmel_aes_gcm_start;
+
+	return 0;
+}
+
+static struct aead_alg aes_gcm_alg = {
+	.setkey		= atmel_aes_gcm_setkey,
+	.setauthsize	= atmel_aes_gcm_setauthsize,
+	.encrypt	= atmel_aes_gcm_encrypt,
+	.decrypt	= atmel_aes_gcm_decrypt,
+	.init		= atmel_aes_gcm_init,
+	.ivsize		= GCM_AES_IV_SIZE,
+	.maxauthsize	= AES_BLOCK_SIZE,
+
+	.base = {
+		.cra_name		= "gcm(aes)",
+		.cra_driver_name	= "atmel-gcm-aes",
+		.cra_priority		= ATMEL_AES_PRIORITY,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct atmel_aes_gcm_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+};
+
+
+/* xts functions */
+
+static inline struct atmel_aes_xts_ctx *
+atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+	return container_of(ctx, struct atmel_aes_xts_ctx, base);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
+
+static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	unsigned long flags;
+	int err;
+
+	atmel_aes_set_mode(dd, rctx);
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	/* Compute the tweak value from req->info with ecb(aes). */
+	flags = dd->flags;
+	dd->flags &= ~AES_FLAGS_MODE_MASK;
+	dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
+	atmel_aes_write_ctrl_key(dd, false, NULL,
+				 ctx->key2, ctx->base.keylen);
+	dd->flags = flags;
+
+	atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+	u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
+	static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+	u8 *tweak_bytes = (u8 *)tweak;
+	int i;
+
+	/* Read the computed ciphered tweak value. */
+	atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
+	/*
+	 * Hardware quirk:
+	 * the order of the ciphered tweak bytes need to be reversed before
+	 * writing them into the ODATARx registers.
+	 */
+	for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
+		u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
+
+		tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
+		tweak_bytes[i] = tmp;
+	}
+
+	/* Process the data. */
+	atmel_aes_write_ctrl(dd, use_dma, NULL);
+	atmel_aes_write_block(dd, AES_TWR(0), tweak);
+	atmel_aes_write_block(dd, AES_ALPHAR(0), one);
+	if (use_dma)
+		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+					   atmel_aes_transfer_complete);
+
+	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+				   atmel_aes_transfer_complete);
+}
+
+static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+				unsigned int keylen)
+{
+	struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	int err;
+
+	err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+	if (err)
+		return err;
+
+	memcpy(ctx->base.key, key, keylen/2);
+	memcpy(ctx->key2, key + keylen/2, keylen/2);
+	ctx->base.keylen = keylen/2;
+
+	return 0;
+}
+
+static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_XTS);
+}
+
+static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+	struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+	ctx->base.start = atmel_aes_xts_start;
+
+	return 0;
+}
+
+static struct crypto_alg aes_xts_alg = {
+	.cra_name		= "xts(aes)",
+	.cra_driver_name	= "atmel-xts-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_xts_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_xts_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_xts_setkey,
+		.encrypt	= atmel_aes_xts_encrypt,
+		.decrypt	= atmel_aes_xts_decrypt,
+	}
+};
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+/* authenc aead functions */
+
+static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
+static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
+				  bool is_async);
+static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
+				      bool is_async);
+static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
+static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
+				   bool is_async);
+
+static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
+{
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
+
+	if (err && (dd->flags & AES_FLAGS_OWN_SHA))
+		atmel_sha_authenc_abort(&rctx->auth_req);
+	dd->flags &= ~AES_FLAGS_OWN_SHA;
+}
+
+static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
+{
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
+	int err;
+
+	atmel_aes_set_mode(dd, &rctx->base);
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
+					  atmel_aes_authenc_init, dd);
+}
+
+static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
+				  bool is_async)
+{
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
+
+	if (is_async)
+		dd->is_async = true;
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	/* If here, we've got the ownership of the SHA device. */
+	dd->flags |= AES_FLAGS_OWN_SHA;
+
+	/* Configure the SHA device. */
+	return atmel_sha_authenc_init(&rctx->auth_req,
+				      req->src, req->assoclen,
+				      rctx->textlen,
+				      atmel_aes_authenc_transfer, dd);
+}
+
+static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
+				      bool is_async)
+{
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
+	bool enc = atmel_aes_is_encrypt(dd);
+	struct scatterlist *src, *dst;
+	u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+	u32 emr;
+
+	if (is_async)
+		dd->is_async = true;
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	/* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
+	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
+	dst = src;
+
+	if (req->src != req->dst)
+		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+
+	/* Configure the AES device. */
+	memcpy(iv, req->iv, sizeof(iv));
+
+	/*
+	 * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
+	 * 'true' even if the data transfer is actually performed by the CPU (so
+	 * not by the DMA) because we must force the AES_MR_SMOD bitfield to the
+	 * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
+	 * must be set to *_MR_SMOD_IDATAR0.
+	 */
+	atmel_aes_write_ctrl(dd, true, iv);
+	emr = AES_EMR_PLIPEN;
+	if (!enc)
+		emr |= AES_EMR_PLIPD;
+	atmel_aes_write(dd, AES_EMR, emr);
+
+	/* Transfer data. */
+	return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
+				   atmel_aes_authenc_digest);
+}
+
+static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
+{
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
+
+	/* atmel_sha_authenc_final() releases the SHA device. */
+	dd->flags &= ~AES_FLAGS_OWN_SHA;
+	return atmel_sha_authenc_final(&rctx->auth_req,
+				       rctx->digest, sizeof(rctx->digest),
+				       atmel_aes_authenc_final, dd);
+}
+
+static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
+				   bool is_async)
+{
+	struct aead_request *req = aead_request_cast(dd->areq);
+	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	bool enc = atmel_aes_is_encrypt(dd);
+	u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
+	u32 offs, authsize;
+
+	if (is_async)
+		dd->is_async = true;
+	if (err)
+		goto complete;
+
+	offs = req->assoclen + rctx->textlen;
+	authsize = crypto_aead_authsize(tfm);
+	if (enc) {
+		scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
+	} else {
+		scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
+		if (crypto_memneq(idigest, odigest, authsize))
+			err = -EBADMSG;
+	}
+
+complete:
+	return atmel_aes_complete(dd, err);
+}
+
+static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
+				    unsigned int keylen)
+{
+	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_authenc_keys keys;
+	u32 flags;
+	int err;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+	if (keys.enckeylen > sizeof(ctx->base.key))
+		goto badkey;
+
+	/* Save auth key. */
+	flags = crypto_aead_get_flags(tfm);
+	err = atmel_sha_authenc_setkey(ctx->auth,
+				       keys.authkey, keys.authkeylen,
+				       &flags);
+	crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK);
+	if (err) {
+		memzero_explicit(&keys, sizeof(keys));
+		return err;
+	}
+
+	/* Save enc key. */
+	ctx->base.keylen = keys.enckeylen;
+	memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
+
+	memzero_explicit(&keys, sizeof(keys));
+	return 0;
+
+badkey:
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
+				      unsigned long auth_mode)
+{
+	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
+	unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
+
+	ctx->auth = atmel_sha_authenc_spawn(auth_mode);
+	if (IS_ERR(ctx->auth))
+		return PTR_ERR(ctx->auth);
+
+	crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
+				      auth_reqsize));
+	ctx->base.start = atmel_aes_authenc_start;
+
+	return 0;
+}
+
+static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
+{
+	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
+}
+
+static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
+{
+	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
+}
+
+static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
+{
+	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
+}
+
+static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
+{
+	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
+}
+
+static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
+{
+	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
+}
+
+static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
+{
+	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
+
+	atmel_sha_authenc_free(ctx->auth);
+}
+
+static int atmel_aes_authenc_crypt(struct aead_request *req,
+				   unsigned long mode)
+{
+	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
+	u32 authsize = crypto_aead_authsize(tfm);
+	bool enc = (mode & AES_FLAGS_ENCRYPT);
+	struct atmel_aes_dev *dd;
+
+	/* Compute text length. */
+	if (!enc && req->cryptlen < authsize)
+		return -EINVAL;
+	rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
+
+	/*
+	 * Currently, empty messages are not supported yet:
+	 * the SHA auto-padding can be used only on non-empty messages.
+	 * Hence a special case needs to be implemented for empty message.
+	 */
+	if (!rctx->textlen && !req->assoclen)
+		return -EINVAL;
+
+	rctx->base.mode = mode;
+	ctx->block_size = AES_BLOCK_SIZE;
+	ctx->is_aead = true;
+
+	dd = atmel_aes_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
+
+	return atmel_aes_handle_queue(dd, &req->base);
+}
+
+static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
+{
+	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
+{
+	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
+}
+
+static struct aead_alg aes_authenc_algs[] = {
+{
+	.setkey		= atmel_aes_authenc_setkey,
+	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
+	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
+	.init		= atmel_aes_authenc_hmac_sha1_init_tfm,
+	.exit		= atmel_aes_authenc_exit_tfm,
+	.ivsize		= AES_BLOCK_SIZE,
+	.maxauthsize	= SHA1_DIGEST_SIZE,
+
+	.base = {
+		.cra_name		= "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name	= "atmel-authenc-hmac-sha1-cbc-aes",
+		.cra_priority		= ATMEL_AES_PRIORITY,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+},
+{
+	.setkey		= atmel_aes_authenc_setkey,
+	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
+	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
+	.init		= atmel_aes_authenc_hmac_sha224_init_tfm,
+	.exit		= atmel_aes_authenc_exit_tfm,
+	.ivsize		= AES_BLOCK_SIZE,
+	.maxauthsize	= SHA224_DIGEST_SIZE,
+
+	.base = {
+		.cra_name		= "authenc(hmac(sha224),cbc(aes))",
+		.cra_driver_name	= "atmel-authenc-hmac-sha224-cbc-aes",
+		.cra_priority		= ATMEL_AES_PRIORITY,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+},
+{
+	.setkey		= atmel_aes_authenc_setkey,
+	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
+	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
+	.init		= atmel_aes_authenc_hmac_sha256_init_tfm,
+	.exit		= atmel_aes_authenc_exit_tfm,
+	.ivsize		= AES_BLOCK_SIZE,
+	.maxauthsize	= SHA256_DIGEST_SIZE,
+
+	.base = {
+		.cra_name		= "authenc(hmac(sha256),cbc(aes))",
+		.cra_driver_name	= "atmel-authenc-hmac-sha256-cbc-aes",
+		.cra_priority		= ATMEL_AES_PRIORITY,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+},
+{
+	.setkey		= atmel_aes_authenc_setkey,
+	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
+	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
+	.init		= atmel_aes_authenc_hmac_sha384_init_tfm,
+	.exit		= atmel_aes_authenc_exit_tfm,
+	.ivsize		= AES_BLOCK_SIZE,
+	.maxauthsize	= SHA384_DIGEST_SIZE,
+
+	.base = {
+		.cra_name		= "authenc(hmac(sha384),cbc(aes))",
+		.cra_driver_name	= "atmel-authenc-hmac-sha384-cbc-aes",
+		.cra_priority		= ATMEL_AES_PRIORITY,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+},
+{
+	.setkey		= atmel_aes_authenc_setkey,
+	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
+	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
+	.init		= atmel_aes_authenc_hmac_sha512_init_tfm,
+	.exit		= atmel_aes_authenc_exit_tfm,
+	.ivsize		= AES_BLOCK_SIZE,
+	.maxauthsize	= SHA512_DIGEST_SIZE,
+
+	.base = {
+		.cra_name		= "authenc(hmac(sha512),cbc(aes))",
+		.cra_driver_name	= "atmel-authenc-hmac-sha512-cbc-aes",
+		.cra_priority		= ATMEL_AES_PRIORITY,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+},
+};
+#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
+
+/* Probe functions */
+
+static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
+{
+	dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
+	dd->buflen = ATMEL_AES_BUFFER_SIZE;
+	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+	if (!dd->buf) {
+		dev_err(dd->dev, "unable to alloc pages.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
+{
+	free_page((unsigned long)dd->buf);
+}
+
+static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+{
+	struct at_dma_slave	*sl = slave;
+
+	if (sl && sl->dma_dev == chan->device->dev) {
+		chan->private = sl;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
+			      struct crypto_platform_data *pdata)
+{
+	struct at_dma_slave *slave;
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* Try to grab 2 DMA channels */
+	slave = &pdata->dma_slave->rxdata;
+	dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
+							slave, dd->dev, "tx");
+	if (!dd->src.chan)
+		goto err_dma_in;
+
+	slave = &pdata->dma_slave->txdata;
+	dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
+							slave, dd->dev, "rx");
+	if (!dd->dst.chan)
+		goto err_dma_out;
+
+	return 0;
+
+err_dma_out:
+	dma_release_channel(dd->src.chan);
+err_dma_in:
+	dev_warn(dd->dev, "no DMA channel available\n");
+	return -ENODEV;
+}
+
+static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
+{
+	dma_release_channel(dd->dst.chan);
+	dma_release_channel(dd->src.chan);
+}
+
+static void atmel_aes_queue_task(unsigned long data)
+{
+	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
+
+	atmel_aes_handle_queue(dd, NULL);
+}
+
+static void atmel_aes_done_task(unsigned long data)
+{
+	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
+
+	dd->is_async = true;
+	(void)dd->resume(dd);
+}
+
+static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
+{
+	struct atmel_aes_dev *aes_dd = dev_id;
+	u32 reg;
+
+	reg = atmel_aes_read(aes_dd, AES_ISR);
+	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
+		atmel_aes_write(aes_dd, AES_IDR, reg);
+		if (AES_FLAGS_BUSY & aes_dd->flags)
+			tasklet_schedule(&aes_dd->done_task);
+		else
+			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
+{
+	int i;
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+	if (dd->caps.has_authenc)
+		for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
+			crypto_unregister_aead(&aes_authenc_algs[i]);
+#endif
+
+	if (dd->caps.has_xts)
+		crypto_unregister_alg(&aes_xts_alg);
+
+	if (dd->caps.has_gcm)
+		crypto_unregister_aead(&aes_gcm_alg);
+
+	if (dd->caps.has_cfb64)
+		crypto_unregister_alg(&aes_cfb64_alg);
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+		crypto_unregister_alg(&aes_algs[i]);
+}
+
+static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
+{
+	int err, i, j;
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+		err = crypto_register_alg(&aes_algs[i]);
+		if (err)
+			goto err_aes_algs;
+	}
+
+	if (dd->caps.has_cfb64) {
+		err = crypto_register_alg(&aes_cfb64_alg);
+		if (err)
+			goto err_aes_cfb64_alg;
+	}
+
+	if (dd->caps.has_gcm) {
+		err = crypto_register_aead(&aes_gcm_alg);
+		if (err)
+			goto err_aes_gcm_alg;
+	}
+
+	if (dd->caps.has_xts) {
+		err = crypto_register_alg(&aes_xts_alg);
+		if (err)
+			goto err_aes_xts_alg;
+	}
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+	if (dd->caps.has_authenc) {
+		for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
+			err = crypto_register_aead(&aes_authenc_algs[i]);
+			if (err)
+				goto err_aes_authenc_alg;
+		}
+	}
+#endif
+
+	return 0;
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+	/* i = ARRAY_SIZE(aes_authenc_algs); */
+err_aes_authenc_alg:
+	for (j = 0; j < i; j++)
+		crypto_unregister_aead(&aes_authenc_algs[j]);
+	crypto_unregister_alg(&aes_xts_alg);
+#endif
+err_aes_xts_alg:
+	crypto_unregister_aead(&aes_gcm_alg);
+err_aes_gcm_alg:
+	crypto_unregister_alg(&aes_cfb64_alg);
+err_aes_cfb64_alg:
+	i = ARRAY_SIZE(aes_algs);
+err_aes_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_alg(&aes_algs[j]);
+
+	return err;
+}
+
+static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
+{
+	dd->caps.has_dualbuff = 0;
+	dd->caps.has_cfb64 = 0;
+	dd->caps.has_ctr32 = 0;
+	dd->caps.has_gcm = 0;
+	dd->caps.has_xts = 0;
+	dd->caps.has_authenc = 0;
+	dd->caps.max_burst_size = 1;
+
+	/* keep only major version number */
+	switch (dd->hw_version & 0xff0) {
+	case 0x500:
+		dd->caps.has_dualbuff = 1;
+		dd->caps.has_cfb64 = 1;
+		dd->caps.has_ctr32 = 1;
+		dd->caps.has_gcm = 1;
+		dd->caps.has_xts = 1;
+		dd->caps.has_authenc = 1;
+		dd->caps.max_burst_size = 4;
+		break;
+	case 0x200:
+		dd->caps.has_dualbuff = 1;
+		dd->caps.has_cfb64 = 1;
+		dd->caps.has_ctr32 = 1;
+		dd->caps.has_gcm = 1;
+		dd->caps.max_burst_size = 4;
+		break;
+	case 0x130:
+		dd->caps.has_dualbuff = 1;
+		dd->caps.has_cfb64 = 1;
+		dd->caps.max_burst_size = 4;
+		break;
+	case 0x120:
+		break;
+	default:
+		dev_warn(dd->dev,
+				"Unmanaged aes version, set minimum capabilities\n");
+		break;
+	}
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_aes_dt_ids[] = {
+	{ .compatible = "atmel,at91sam9g46-aes" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
+
+static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct crypto_platform_data *pdata;
+
+	if (!np) {
+		dev_err(&pdev->dev, "device node not found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->dma_slave = devm_kzalloc(&pdev->dev,
+					sizeof(*(pdata->dma_slave)),
+					GFP_KERNEL);
+	if (!pdata->dma_slave) {
+		devm_kfree(&pdev->dev, pdata);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return pdata;
+}
+#else
+static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
+{
+	return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int atmel_aes_probe(struct platform_device *pdev)
+{
+	struct atmel_aes_dev *aes_dd;
+	struct crypto_platform_data *pdata;
+	struct device *dev = &pdev->dev;
+	struct resource *aes_res;
+	int err;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		pdata = atmel_aes_of_init(pdev);
+		if (IS_ERR(pdata)) {
+			err = PTR_ERR(pdata);
+			goto aes_dd_err;
+		}
+	}
+
+	if (!pdata->dma_slave) {
+		err = -ENXIO;
+		goto aes_dd_err;
+	}
+
+	aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
+	if (aes_dd == NULL) {
+		err = -ENOMEM;
+		goto aes_dd_err;
+	}
+
+	aes_dd->dev = dev;
+
+	platform_set_drvdata(pdev, aes_dd);
+
+	INIT_LIST_HEAD(&aes_dd->list);
+	spin_lock_init(&aes_dd->lock);
+
+	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
+					(unsigned long)aes_dd);
+	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
+					(unsigned long)aes_dd);
+
+	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
+
+	/* Get the base address */
+	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!aes_res) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto res_err;
+	}
+	aes_dd->phys_base = aes_res->start;
+
+	/* Get the IRQ */
+	aes_dd->irq = platform_get_irq(pdev,  0);
+	if (aes_dd->irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = aes_dd->irq;
+		goto res_err;
+	}
+
+	err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
+			       IRQF_SHARED, "atmel-aes", aes_dd);
+	if (err) {
+		dev_err(dev, "unable to request aes irq.\n");
+		goto res_err;
+	}
+
+	/* Initializing the clock */
+	aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
+	if (IS_ERR(aes_dd->iclk)) {
+		dev_err(dev, "clock initialization failed.\n");
+		err = PTR_ERR(aes_dd->iclk);
+		goto res_err;
+	}
+
+	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
+	if (IS_ERR(aes_dd->io_base)) {
+		dev_err(dev, "can't ioremap\n");
+		err = PTR_ERR(aes_dd->io_base);
+		goto res_err;
+	}
+
+	err = clk_prepare(aes_dd->iclk);
+	if (err)
+		goto res_err;
+
+	err = atmel_aes_hw_version_init(aes_dd);
+	if (err)
+		goto iclk_unprepare;
+
+	atmel_aes_get_cap(aes_dd);
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+	if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
+		err = -EPROBE_DEFER;
+		goto iclk_unprepare;
+	}
+#endif
+
+	err = atmel_aes_buff_init(aes_dd);
+	if (err)
+		goto err_aes_buff;
+
+	err = atmel_aes_dma_init(aes_dd, pdata);
+	if (err)
+		goto err_aes_dma;
+
+	spin_lock(&atmel_aes.lock);
+	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
+	spin_unlock(&atmel_aes.lock);
+
+	err = atmel_aes_register_algs(aes_dd);
+	if (err)
+		goto err_algs;
+
+	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
+			dma_chan_name(aes_dd->src.chan),
+			dma_chan_name(aes_dd->dst.chan));
+
+	return 0;
+
+err_algs:
+	spin_lock(&atmel_aes.lock);
+	list_del(&aes_dd->list);
+	spin_unlock(&atmel_aes.lock);
+	atmel_aes_dma_cleanup(aes_dd);
+err_aes_dma:
+	atmel_aes_buff_cleanup(aes_dd);
+err_aes_buff:
+iclk_unprepare:
+	clk_unprepare(aes_dd->iclk);
+res_err:
+	tasklet_kill(&aes_dd->done_task);
+	tasklet_kill(&aes_dd->queue_task);
+aes_dd_err:
+	if (err != -EPROBE_DEFER)
+		dev_err(dev, "initialization failed.\n");
+
+	return err;
+}
+
+static int atmel_aes_remove(struct platform_device *pdev)
+{
+	struct atmel_aes_dev *aes_dd;
+
+	aes_dd = platform_get_drvdata(pdev);
+	if (!aes_dd)
+		return -ENODEV;
+	spin_lock(&atmel_aes.lock);
+	list_del(&aes_dd->list);
+	spin_unlock(&atmel_aes.lock);
+
+	atmel_aes_unregister_algs(aes_dd);
+
+	tasklet_kill(&aes_dd->done_task);
+	tasklet_kill(&aes_dd->queue_task);
+
+	atmel_aes_dma_cleanup(aes_dd);
+	atmel_aes_buff_cleanup(aes_dd);
+
+	clk_unprepare(aes_dd->iclk);
+
+	return 0;
+}
+
+static struct platform_driver atmel_aes_driver = {
+	.probe		= atmel_aes_probe,
+	.remove		= atmel_aes_remove,
+	.driver		= {
+		.name	= "atmel_aes",
+		.of_match_table = of_match_ptr(atmel_aes_dt_ids),
+	},
+};
+
+module_platform_driver(atmel_aes_driver);
+
+MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
new file mode 100644
index 0000000..2a60d12
--- /dev/null
+++ b/drivers/crypto/atmel-authenc.h
@@ -0,0 +1,64 @@
+/*
+ * API for Atmel Secure Protocol Layers Improved Performances (SPLIP)
+ *
+ * Copyright (C) 2016 Atmel Corporation
+ *
+ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
+ */
+
+#ifndef __ATMEL_AUTHENC_H__
+#define __ATMEL_AUTHENC_H__
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include "atmel-sha-regs.h"
+
+struct atmel_aes_dev;
+typedef int (*atmel_aes_authenc_fn_t)(struct atmel_aes_dev *, int, bool);
+
+struct atmel_sha_authenc_ctx;
+
+bool atmel_sha_authenc_is_ready(void);
+unsigned int atmel_sha_authenc_get_reqsize(void);
+
+struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode);
+void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth);
+int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
+			     const u8 *key, unsigned int keylen,
+			     u32 *flags);
+
+int atmel_sha_authenc_schedule(struct ahash_request *req,
+			       struct atmel_sha_authenc_ctx *auth,
+			       atmel_aes_authenc_fn_t cb,
+			       struct atmel_aes_dev *dd);
+int atmel_sha_authenc_init(struct ahash_request *req,
+			   struct scatterlist *assoc, unsigned int assoclen,
+			   unsigned int textlen,
+			   atmel_aes_authenc_fn_t cb,
+			   struct atmel_aes_dev *dd);
+int atmel_sha_authenc_final(struct ahash_request *req,
+			    u32 *digest, unsigned int digestlen,
+			    atmel_aes_authenc_fn_t cb,
+			    struct atmel_aes_dev *dd);
+void  atmel_sha_authenc_abort(struct ahash_request *req);
+
+#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
+
+#endif /* __ATMEL_AUTHENC_H__ */
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
new file mode 100644
index 0000000..74f083f
--- /dev/null
+++ b/drivers/crypto/atmel-ecc.c
@@ -0,0 +1,790 @@
+/*
+ * Microchip / Atmel ECC (I2C) driver.
+ *
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/ecdh.h>
+#include <crypto/kpp.h>
+#include "atmel-ecc.h"
+
+/* Used for binding tfm objects to i2c clients. */
+struct atmel_ecc_driver_data {
+	struct list_head i2c_client_list;
+	spinlock_t i2c_list_lock;
+} ____cacheline_aligned;
+
+static struct atmel_ecc_driver_data driver_data;
+
+/**
+ * atmel_ecc_i2c_client_priv - i2c_client private data
+ * @client              : pointer to i2c client device
+ * @i2c_client_list_node: part of i2c_client_list
+ * @lock                : lock for sending i2c commands
+ * @wake_token          : wake token array of zeros
+ * @wake_token_sz       : size in bytes of the wake_token
+ * @tfm_count           : number of active crypto transformations on i2c client
+ *
+ * Reads and writes from/to the i2c client are sequential. The first byte
+ * transmitted to the device is treated as the byte size. Any attempt to send
+ * more than this number of bytes will cause the device to not ACK those bytes.
+ * After the host writes a single command byte to the input buffer, reads are
+ * prohibited until after the device completes command execution. Use a mutex
+ * when sending i2c commands.
+ */
+struct atmel_ecc_i2c_client_priv {
+	struct i2c_client *client;
+	struct list_head i2c_client_list_node;
+	struct mutex lock;
+	u8 wake_token[WAKE_TOKEN_MAX_SIZE];
+	size_t wake_token_sz;
+	atomic_t tfm_count ____cacheline_aligned;
+};
+
+/**
+ * atmel_ecdh_ctx - transformation context
+ * @client     : pointer to i2c client device
+ * @fallback   : used for unsupported curves or when user wants to use its own
+ *               private key.
+ * @public_key : generated when calling set_secret(). It's the responsibility
+ *               of the user to not call set_secret() while
+ *               generate_public_key() or compute_shared_secret() are in flight.
+ * @curve_id   : elliptic curve id
+ * @n_sz       : size in bytes of the n prime
+ * @do_fallback: true when the device doesn't support the curve or when the user
+ *               wants to use its own private key.
+ */
+struct atmel_ecdh_ctx {
+	struct i2c_client *client;
+	struct crypto_kpp *fallback;
+	const u8 *public_key;
+	unsigned int curve_id;
+	size_t n_sz;
+	bool do_fallback;
+};
+
+/**
+ * atmel_ecc_work_data - data structure representing the work
+ * @ctx : transformation context.
+ * @cbk : pointer to a callback function to be invoked upon completion of this
+ *        request. This has the form:
+ *        callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
+ *        where:
+ *        @work_data: data structure representing the work
+ *        @areq     : optional pointer to an argument passed with the original
+ *                    request.
+ *        @status   : status returned from the i2c client device or i2c error.
+ * @areq: optional pointer to a user argument for use at callback time.
+ * @work: describes the task to be executed.
+ * @cmd : structure used for communicating with the device.
+ */
+struct atmel_ecc_work_data {
+	struct atmel_ecdh_ctx *ctx;
+	void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
+		    int status);
+	void *areq;
+	struct work_struct work;
+	struct atmel_ecc_cmd cmd;
+};
+
+static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
+{
+	return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
+}
+
+/**
+ * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
+ * CRC16 verification of the count, opcode, param1, param2 and data bytes.
+ * The checksum is saved in little-endian format in the least significant
+ * two bytes of the command. CRC polynomial is 0x8005 and the initial register
+ * value should be zero.
+ *
+ * @cmd : structure used for communicating with the device.
+ */
+static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
+{
+	u8 *data = &cmd->count;
+	size_t len = cmd->count - CRC_SIZE;
+	u16 *crc16 = (u16 *)(data + len);
+
+	*crc16 = atmel_ecc_crc16(0, data, len);
+}
+
+static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
+{
+	cmd->word_addr = COMMAND;
+	cmd->opcode = OPCODE_READ;
+	/*
+	 * Read the word from Configuration zone that contains the lock bytes
+	 * (UserExtra, Selector, LockValue, LockConfig).
+	 */
+	cmd->param1 = CONFIG_ZONE;
+	cmd->param2 = DEVICE_LOCK_ADDR;
+	cmd->count = READ_COUNT;
+
+	atmel_ecc_checksum(cmd);
+
+	cmd->msecs = MAX_EXEC_TIME_READ;
+	cmd->rxsize = READ_RSP_SIZE;
+}
+
+static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
+{
+	cmd->word_addr = COMMAND;
+	cmd->count = GENKEY_COUNT;
+	cmd->opcode = OPCODE_GENKEY;
+	cmd->param1 = GENKEY_MODE_PRIVATE;
+	/* a random private key will be generated and stored in slot keyID */
+	cmd->param2 = cpu_to_le16(keyid);
+
+	atmel_ecc_checksum(cmd);
+
+	cmd->msecs = MAX_EXEC_TIME_GENKEY;
+	cmd->rxsize = GENKEY_RSP_SIZE;
+}
+
+static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
+				   struct scatterlist *pubkey)
+{
+	size_t copied;
+
+	cmd->word_addr = COMMAND;
+	cmd->count = ECDH_COUNT;
+	cmd->opcode = OPCODE_ECDH;
+	cmd->param1 = ECDH_PREFIX_MODE;
+	/* private key slot */
+	cmd->param2 = cpu_to_le16(DATA_SLOT_2);
+
+	/*
+	 * The device only supports NIST P256 ECC keys. The public key size will
+	 * always be the same. Use a macro for the key size to avoid unnecessary
+	 * computations.
+	 */
+	copied = sg_copy_to_buffer(pubkey,
+				   sg_nents_for_len(pubkey,
+						    ATMEL_ECC_PUBKEY_SIZE),
+				   cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+	if (copied != ATMEL_ECC_PUBKEY_SIZE)
+		return -EINVAL;
+
+	atmel_ecc_checksum(cmd);
+
+	cmd->msecs = MAX_EXEC_TIME_ECDH;
+	cmd->rxsize = ECDH_RSP_SIZE;
+
+	return 0;
+}
+
+/*
+ * After wake and after execution of a command, there will be error, status, or
+ * result bytes in the device's output register that can be retrieved by the
+ * system. When the length of that group is four bytes, the codes returned are
+ * detailed in error_list.
+ */
+static int atmel_ecc_status(struct device *dev, u8 *status)
+{
+	size_t err_list_len = ARRAY_SIZE(error_list);
+	int i;
+	u8 err_id = status[1];
+
+	if (*status != STATUS_SIZE)
+		return 0;
+
+	if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
+		return 0;
+
+	for (i = 0; i < err_list_len; i++)
+		if (error_list[i].value == err_id)
+			break;
+
+	/* if err_id is not in the error_list then ignore it */
+	if (i != err_list_len) {
+		dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
+		return err_id;
+	}
+
+	return 0;
+}
+
+static int atmel_ecc_wakeup(struct i2c_client *client)
+{
+	struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+	u8 status[STATUS_RSP_SIZE];
+	int ret;
+
+	/*
+	 * The device ignores any levels or transitions on the SCL pin when the
+	 * device is idle, asleep or during waking up. Don't check for error
+	 * when waking up the device.
+	 */
+	i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
+
+	/*
+	 * Wait to wake the device. Typical execution times for ecdh and genkey
+	 * are around tens of milliseconds. Delta is chosen to 50 microseconds.
+	 */
+	usleep_range(TWHI_MIN, TWHI_MAX);
+
+	ret = i2c_master_recv(client, status, STATUS_SIZE);
+	if (ret < 0)
+		return ret;
+
+	return atmel_ecc_status(&client->dev, status);
+}
+
+static int atmel_ecc_sleep(struct i2c_client *client)
+{
+	u8 sleep = SLEEP_TOKEN;
+
+	return i2c_master_send(client, &sleep, 1);
+}
+
+static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
+			    int status)
+{
+	struct kpp_request *req = areq;
+	struct atmel_ecdh_ctx *ctx = work_data->ctx;
+	struct atmel_ecc_cmd *cmd = &work_data->cmd;
+	size_t copied, n_sz;
+
+	if (status)
+		goto free_work_data;
+
+	/* might want less than we've got */
+	n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+
+	/* copy the shared secret */
+	copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
+				     &cmd->data[RSP_DATA_IDX], n_sz);
+	if (copied != n_sz)
+		status = -EINVAL;
+
+	/* fall through */
+free_work_data:
+	kzfree(work_data);
+	kpp_request_complete(req, status);
+}
+
+/*
+ * atmel_ecc_send_receive() - send a command to the device and receive its
+ *                            response.
+ * @client: i2c client device
+ * @cmd   : structure used to communicate with the device
+ *
+ * After the device receives a Wake token, a watchdog counter starts within the
+ * device. After the watchdog timer expires, the device enters sleep mode
+ * regardless of whether some I/O transmission or command execution is in
+ * progress. If a command is attempted when insufficient time remains prior to
+ * watchdog timer execution, the device will return the watchdog timeout error
+ * code without attempting to execute the command. There is no way to reset the
+ * counter other than to put the device into sleep or idle mode and then
+ * wake it up again.
+ */
+static int atmel_ecc_send_receive(struct i2c_client *client,
+				  struct atmel_ecc_cmd *cmd)
+{
+	struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+	int ret;
+
+	mutex_lock(&i2c_priv->lock);
+
+	ret = atmel_ecc_wakeup(client);
+	if (ret)
+		goto err;
+
+	/* send the command */
+	ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
+	if (ret < 0)
+		goto err;
+
+	/* delay the appropriate amount of time for command to execute */
+	msleep(cmd->msecs);
+
+	/* receive the response */
+	ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
+	if (ret < 0)
+		goto err;
+
+	/* put the device into low-power mode */
+	ret = atmel_ecc_sleep(client);
+	if (ret < 0)
+		goto err;
+
+	mutex_unlock(&i2c_priv->lock);
+	return atmel_ecc_status(&client->dev, cmd->data);
+err:
+	mutex_unlock(&i2c_priv->lock);
+	return ret;
+}
+
+static void atmel_ecc_work_handler(struct work_struct *work)
+{
+	struct atmel_ecc_work_data *work_data =
+			container_of(work, struct atmel_ecc_work_data, work);
+	struct atmel_ecc_cmd *cmd = &work_data->cmd;
+	struct i2c_client *client = work_data->ctx->client;
+	int status;
+
+	status = atmel_ecc_send_receive(client, cmd);
+	work_data->cbk(work_data, work_data->areq, status);
+}
+
+static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
+			      void (*cbk)(struct atmel_ecc_work_data *work_data,
+					  void *areq, int status),
+			      void *areq)
+{
+	work_data->cbk = (void *)cbk;
+	work_data->areq = areq;
+
+	INIT_WORK(&work_data->work, atmel_ecc_work_handler);
+	schedule_work(&work_data->work);
+}
+
+static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
+{
+	if (curve_id == ECC_CURVE_NIST_P256)
+		return ATMEL_ECC_NIST_P256_N_SIZE;
+
+	return 0;
+}
+
+/*
+ * A random private key is generated and stored in the device. The device
+ * returns the pair public key.
+ */
+static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+				 unsigned int len)
+{
+	struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct atmel_ecc_cmd *cmd;
+	void *public_key;
+	struct ecdh params;
+	int ret = -ENOMEM;
+
+	/* free the old public key, if any */
+	kfree(ctx->public_key);
+	/* make sure you don't free the old public key twice */
+	ctx->public_key = NULL;
+
+	if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+		dev_err(&ctx->client->dev, "crypto_ecdh_decode_key failed\n");
+		return -EINVAL;
+	}
+
+	ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
+	if (!ctx->n_sz || params.key_size) {
+		/* fallback to ecdh software implementation */
+		ctx->do_fallback = true;
+		return crypto_kpp_set_secret(ctx->fallback, buf, len);
+	}
+
+	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	/*
+	 * The device only supports NIST P256 ECC keys. The public key size will
+	 * always be the same. Use a macro for the key size to avoid unnecessary
+	 * computations.
+	 */
+	public_key = kmalloc(ATMEL_ECC_PUBKEY_SIZE, GFP_KERNEL);
+	if (!public_key)
+		goto free_cmd;
+
+	ctx->do_fallback = false;
+	ctx->curve_id = params.curve_id;
+
+	atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2);
+
+	ret = atmel_ecc_send_receive(ctx->client, cmd);
+	if (ret)
+		goto free_public_key;
+
+	/* save the public key */
+	memcpy(public_key, &cmd->data[RSP_DATA_IDX], ATMEL_ECC_PUBKEY_SIZE);
+	ctx->public_key = public_key;
+
+	kfree(cmd);
+	return 0;
+
+free_public_key:
+	kfree(public_key);
+free_cmd:
+	kfree(cmd);
+	return ret;
+}
+
+static int atmel_ecdh_generate_public_key(struct kpp_request *req)
+{
+	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+	struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+	size_t copied, nbytes;
+	int ret = 0;
+
+	if (ctx->do_fallback) {
+		kpp_request_set_tfm(req, ctx->fallback);
+		return crypto_kpp_generate_public_key(req);
+	}
+
+	/* might want less than we've got */
+	nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
+
+	/* public key was saved at private key generation */
+	copied = sg_copy_from_buffer(req->dst,
+				     sg_nents_for_len(req->dst, nbytes),
+				     ctx->public_key, nbytes);
+	if (copied != nbytes)
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+	struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct atmel_ecc_work_data *work_data;
+	gfp_t gfp;
+	int ret;
+
+	if (ctx->do_fallback) {
+		kpp_request_set_tfm(req, ctx->fallback);
+		return crypto_kpp_compute_shared_secret(req);
+	}
+
+	/* must have exactly two points to be on the curve */
+	if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
+		return -EINVAL;
+
+	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+							     GFP_ATOMIC;
+
+	work_data = kmalloc(sizeof(*work_data), gfp);
+	if (!work_data)
+		return -ENOMEM;
+
+	work_data->ctx = ctx;
+
+	ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src);
+	if (ret)
+		goto free_work_data;
+
+	atmel_ecc_enqueue(work_data, atmel_ecdh_done, req);
+
+	return -EINPROGRESS;
+
+free_work_data:
+	kfree(work_data);
+	return ret;
+}
+
+static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
+{
+	struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
+	struct i2c_client *client = ERR_PTR(-ENODEV);
+	int min_tfm_cnt = INT_MAX;
+	int tfm_cnt;
+
+	spin_lock(&driver_data.i2c_list_lock);
+
+	if (list_empty(&driver_data.i2c_client_list)) {
+		spin_unlock(&driver_data.i2c_list_lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	list_for_each_entry(i2c_priv, &driver_data.i2c_client_list,
+			    i2c_client_list_node) {
+		tfm_cnt = atomic_read(&i2c_priv->tfm_count);
+		if (tfm_cnt < min_tfm_cnt) {
+			min_tfm_cnt = tfm_cnt;
+			min_i2c_priv = i2c_priv;
+		}
+		if (!min_tfm_cnt)
+			break;
+	}
+
+	if (min_i2c_priv) {
+		atomic_inc(&min_i2c_priv->tfm_count);
+		client = min_i2c_priv->client;
+	}
+
+	spin_unlock(&driver_data.i2c_list_lock);
+
+	return client;
+}
+
+static void atmel_ecc_i2c_client_free(struct i2c_client *client)
+{
+	struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+	atomic_dec(&i2c_priv->tfm_count);
+}
+
+static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
+{
+	const char *alg = kpp_alg_name(tfm);
+	struct crypto_kpp *fallback;
+	struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+	ctx->client = atmel_ecc_i2c_client_alloc();
+	if (IS_ERR(ctx->client)) {
+		pr_err("tfm - i2c_client binding failed\n");
+		return PTR_ERR(ctx->client);
+	}
+
+	fallback = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback)) {
+		dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
+			alg, PTR_ERR(fallback));
+		return PTR_ERR(fallback);
+	}
+
+	crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
+	ctx->fallback = fallback;
+
+	return 0;
+}
+
+static void atmel_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+	struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+	kfree(ctx->public_key);
+	crypto_free_kpp(ctx->fallback);
+	atmel_ecc_i2c_client_free(ctx->client);
+}
+
+static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
+{
+	struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+	if (ctx->fallback)
+		return crypto_kpp_maxsize(ctx->fallback);
+
+	/*
+	 * The device only supports NIST P256 ECC keys. The public key size will
+	 * always be the same. Use a macro for the key size to avoid unnecessary
+	 * computations.
+	 */
+	return ATMEL_ECC_PUBKEY_SIZE;
+}
+
+static struct kpp_alg atmel_ecdh = {
+	.set_secret = atmel_ecdh_set_secret,
+	.generate_public_key = atmel_ecdh_generate_public_key,
+	.compute_shared_secret = atmel_ecdh_compute_shared_secret,
+	.init = atmel_ecdh_init_tfm,
+	.exit = atmel_ecdh_exit_tfm,
+	.max_size = atmel_ecdh_max_size,
+	.base = {
+		.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+		.cra_name = "ecdh",
+		.cra_driver_name = "atmel-ecdh",
+		.cra_priority = ATMEL_ECC_PRIORITY,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct atmel_ecdh_ctx),
+	},
+};
+
+static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
+{
+	u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
+
+	/* return the size of the wake_token in bytes */
+	return DIV_ROUND_UP(no_of_bits, 8);
+}
+
+static int device_sanity_check(struct i2c_client *client)
+{
+	struct atmel_ecc_cmd *cmd;
+	int ret;
+
+	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	atmel_ecc_init_read_cmd(cmd);
+
+	ret = atmel_ecc_send_receive(client, cmd);
+	if (ret)
+		goto free_cmd;
+
+	/*
+	 * It is vital that the Configuration, Data and OTP zones be locked
+	 * prior to release into the field of the system containing the device.
+	 * Failure to lock these zones may permit modification of any secret
+	 * keys and may lead to other security problems.
+	 */
+	if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
+		dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
+		ret = -ENOTSUPP;
+	}
+
+	/* fall through */
+free_cmd:
+	kfree(cmd);
+	return ret;
+}
+
+static int atmel_ecc_probe(struct i2c_client *client,
+			   const struct i2c_device_id *id)
+{
+	struct atmel_ecc_i2c_client_priv *i2c_priv;
+	struct device *dev = &client->dev;
+	int ret;
+	u32 bus_clk_rate;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(dev, "I2C_FUNC_I2C not supported\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(client->adapter->dev.of_node,
+				   "clock-frequency", &bus_clk_rate);
+	if (ret) {
+		dev_err(dev, "of: failed to read clock-frequency property\n");
+		return ret;
+	}
+
+	if (bus_clk_rate > 1000000L) {
+		dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+			bus_clk_rate);
+		return -EINVAL;
+	}
+
+	i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
+	if (!i2c_priv)
+		return -ENOMEM;
+
+	i2c_priv->client = client;
+	mutex_init(&i2c_priv->lock);
+
+	/*
+	 * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
+	 * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
+	 * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
+	 */
+	i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
+
+	memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
+
+	atomic_set(&i2c_priv->tfm_count, 0);
+
+	i2c_set_clientdata(client, i2c_priv);
+
+	ret = device_sanity_check(client);
+	if (ret)
+		return ret;
+
+	spin_lock(&driver_data.i2c_list_lock);
+	list_add_tail(&i2c_priv->i2c_client_list_node,
+		      &driver_data.i2c_client_list);
+	spin_unlock(&driver_data.i2c_list_lock);
+
+	ret = crypto_register_kpp(&atmel_ecdh);
+	if (ret) {
+		spin_lock(&driver_data.i2c_list_lock);
+		list_del(&i2c_priv->i2c_client_list_node);
+		spin_unlock(&driver_data.i2c_list_lock);
+
+		dev_err(dev, "%s alg registration failed\n",
+			atmel_ecdh.base.cra_driver_name);
+	} else {
+		dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n");
+	}
+
+	return ret;
+}
+
+static int atmel_ecc_remove(struct i2c_client *client)
+{
+	struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+	/* Return EBUSY if i2c client already allocated. */
+	if (atomic_read(&i2c_priv->tfm_count)) {
+		dev_err(&client->dev, "Device is busy\n");
+		return -EBUSY;
+	}
+
+	crypto_unregister_kpp(&atmel_ecdh);
+
+	spin_lock(&driver_data.i2c_list_lock);
+	list_del(&i2c_priv->i2c_client_list_node);
+	spin_unlock(&driver_data.i2c_list_lock);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ecc_dt_ids[] = {
+	{
+		.compatible = "atmel,atecc508a",
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
+#endif
+
+static const struct i2c_device_id atmel_ecc_id[] = {
+	{ "atecc508a", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);
+
+static struct i2c_driver atmel_ecc_driver = {
+	.driver = {
+		.name	= "atmel-ecc",
+		.of_match_table = of_match_ptr(atmel_ecc_dt_ids),
+	},
+	.probe		= atmel_ecc_probe,
+	.remove		= atmel_ecc_remove,
+	.id_table	= atmel_ecc_id,
+};
+
+static int __init atmel_ecc_init(void)
+{
+	spin_lock_init(&driver_data.i2c_list_lock);
+	INIT_LIST_HEAD(&driver_data.i2c_client_list);
+	return i2c_add_driver(&atmel_ecc_driver);
+}
+
+static void __exit atmel_ecc_exit(void)
+{
+	flush_scheduled_work();
+	i2c_del_driver(&atmel_ecc_driver);
+}
+
+module_init(atmel_ecc_init);
+module_exit(atmel_ecc_exit);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
new file mode 100644
index 0000000..25232c8
--- /dev/null
+++ b/drivers/crypto/atmel-ecc.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef __ATMEL_ECC_H__
+#define __ATMEL_ECC_H__
+
+#define ATMEL_ECC_PRIORITY		300
+
+#define COMMAND				0x03 /* packet function */
+#define SLEEP_TOKEN			0x01
+#define WAKE_TOKEN_MAX_SIZE		8
+
+/* Definitions of Data and Command sizes */
+#define WORD_ADDR_SIZE			1
+#define COUNT_SIZE			1
+#define CRC_SIZE			2
+#define CMD_OVERHEAD_SIZE		(COUNT_SIZE + CRC_SIZE)
+
+/* size in bytes of the n prime */
+#define ATMEL_ECC_NIST_P256_N_SIZE	32
+#define ATMEL_ECC_PUBKEY_SIZE		(2 * ATMEL_ECC_NIST_P256_N_SIZE)
+
+#define STATUS_RSP_SIZE			4
+#define ECDH_RSP_SIZE			(32 + CMD_OVERHEAD_SIZE)
+#define GENKEY_RSP_SIZE			(ATMEL_ECC_PUBKEY_SIZE + \
+					 CMD_OVERHEAD_SIZE)
+#define READ_RSP_SIZE			(4 + CMD_OVERHEAD_SIZE)
+#define MAX_RSP_SIZE			GENKEY_RSP_SIZE
+
+/**
+ * atmel_ecc_cmd - structure used for communicating with the device.
+ * @word_addr: indicates the function of the packet sent to the device. This
+ *             byte should have a value of COMMAND for normal operation.
+ * @count    : number of bytes to be transferred to (or from) the device.
+ * @opcode   : the command code.
+ * @param1   : the first parameter; always present.
+ * @param2   : the second parameter; always present.
+ * @data     : optional remaining input data. Includes a 2-byte CRC.
+ * @rxsize   : size of the data received from i2c client.
+ * @msecs    : command execution time in milliseconds
+ */
+struct atmel_ecc_cmd {
+	u8 word_addr;
+	u8 count;
+	u8 opcode;
+	u8 param1;
+	u16 param2;
+	u8 data[MAX_RSP_SIZE];
+	u8 msecs;
+	u16 rxsize;
+} __packed;
+
+/* Status/Error codes */
+#define STATUS_SIZE			0x04
+#define STATUS_NOERR			0x00
+#define STATUS_WAKE_SUCCESSFUL		0x11
+
+static const struct {
+	u8 value;
+	const char *error_text;
+} error_list[] = {
+	{ 0x01, "CheckMac or Verify miscompare" },
+	{ 0x03, "Parse Error" },
+	{ 0x05, "ECC Fault" },
+	{ 0x0F, "Execution Error" },
+	{ 0xEE, "Watchdog about to expire" },
+	{ 0xFF, "CRC or other communication error" },
+};
+
+/* Definitions for eeprom organization */
+#define CONFIG_ZONE			0
+
+/* Definitions for Indexes common to all commands */
+#define RSP_DATA_IDX			1 /* buffer index of data in response */
+#define DATA_SLOT_2			2 /* used for ECDH private key */
+
+/* Definitions for the device lock state */
+#define DEVICE_LOCK_ADDR		0x15
+#define LOCK_VALUE_IDX			(RSP_DATA_IDX + 2)
+#define LOCK_CONFIG_IDX			(RSP_DATA_IDX + 3)
+
+/*
+ * Wake High delay to data communication (microseconds). SDA should be stable
+ * high for this entire duration.
+ */
+#define TWHI_MIN			1500
+#define TWHI_MAX			1550
+
+/* Wake Low duration */
+#define TWLO_USEC			60
+
+/* Command execution time (milliseconds) */
+#define MAX_EXEC_TIME_ECDH		58
+#define MAX_EXEC_TIME_GENKEY		115
+#define MAX_EXEC_TIME_READ		1
+
+/* Command opcode */
+#define OPCODE_ECDH			0x43
+#define OPCODE_GENKEY			0x40
+#define OPCODE_READ			0x02
+
+/* Definitions for the READ Command */
+#define READ_COUNT			7
+
+/* Definitions for the GenKey Command */
+#define GENKEY_COUNT			7
+#define GENKEY_MODE_PRIVATE		0x04
+
+/* Definitions for the ECDH Command */
+#define ECDH_COUNT			71
+#define ECDH_PREFIX_MODE		0x00
+
+#endif /* __ATMEL_ECC_H__ */
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
new file mode 100644
index 0000000..b2b5e63
--- /dev/null
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ATMEL_SHA_REGS_H__
+#define __ATMEL_SHA_REGS_H__
+
+#define SHA_REG_DIGEST(x)		(0x80 + ((x) * 0x04))
+#define SHA_REG_DIN(x)			(0x40 + ((x) * 0x04))
+
+#define SHA_CR				0x00
+#define SHA_CR_START			(1 << 0)
+#define SHA_CR_FIRST			(1 << 4)
+#define SHA_CR_SWRST			(1 << 8)
+#define SHA_CR_WUIHV			(1 << 12)
+#define SHA_CR_WUIEHV			(1 << 13)
+
+#define SHA_MR				0x04
+#define SHA_MR_MODE_MASK		(0x3 << 0)
+#define SHA_MR_MODE_MANUAL		0x0
+#define SHA_MR_MODE_AUTO		0x1
+#define SHA_MR_MODE_PDC			0x2
+#define SHA_MR_MODE_IDATAR0		0x2
+#define SHA_MR_PROCDLY			(1 << 4)
+#define SHA_MR_UIHV			(1 << 5)
+#define SHA_MR_UIEHV			(1 << 6)
+#define SHA_MR_ALGO_MASK		GENMASK(10, 8)
+#define SHA_MR_ALGO_SHA1		(0 << 8)
+#define SHA_MR_ALGO_SHA256		(1 << 8)
+#define SHA_MR_ALGO_SHA384		(2 << 8)
+#define SHA_MR_ALGO_SHA512		(3 << 8)
+#define SHA_MR_ALGO_SHA224		(4 << 8)
+#define SHA_MR_HMAC			(1 << 11)
+#define	SHA_MR_DUALBUFF			(1 << 16)
+
+#define SHA_FLAGS_ALGO_MASK	SHA_MR_ALGO_MASK
+#define SHA_FLAGS_SHA1		SHA_MR_ALGO_SHA1
+#define SHA_FLAGS_SHA256	SHA_MR_ALGO_SHA256
+#define SHA_FLAGS_SHA384	SHA_MR_ALGO_SHA384
+#define SHA_FLAGS_SHA512	SHA_MR_ALGO_SHA512
+#define SHA_FLAGS_SHA224	SHA_MR_ALGO_SHA224
+#define SHA_FLAGS_HMAC		SHA_MR_HMAC
+#define SHA_FLAGS_HMAC_SHA1	(SHA_FLAGS_HMAC | SHA_FLAGS_SHA1)
+#define SHA_FLAGS_HMAC_SHA256	(SHA_FLAGS_HMAC | SHA_FLAGS_SHA256)
+#define SHA_FLAGS_HMAC_SHA384	(SHA_FLAGS_HMAC | SHA_FLAGS_SHA384)
+#define SHA_FLAGS_HMAC_SHA512	(SHA_FLAGS_HMAC | SHA_FLAGS_SHA512)
+#define SHA_FLAGS_HMAC_SHA224	(SHA_FLAGS_HMAC | SHA_FLAGS_SHA224)
+#define SHA_FLAGS_MODE_MASK	(SHA_FLAGS_HMAC | SHA_FLAGS_ALGO_MASK)
+
+#define SHA_IER				0x10
+#define SHA_IDR				0x14
+#define SHA_IMR				0x18
+#define SHA_ISR				0x1C
+#define SHA_INT_DATARDY			(1 << 0)
+#define SHA_INT_ENDTX			(1 << 1)
+#define SHA_INT_TXBUFE			(1 << 2)
+#define SHA_INT_URAD			(1 << 8)
+#define SHA_ISR_URAT_MASK		(0x7 << 12)
+#define SHA_ISR_URAT_IDR		(0x0 << 12)
+#define SHA_ISR_URAT_ODR		(0x1 << 12)
+#define SHA_ISR_URAT_MR			(0x2 << 12)
+#define SHA_ISR_URAT_WO			(0x5 << 12)
+
+#define SHA_MSR				0x20
+#define SHA_BCR				0x30
+
+#define	SHA_HW_VERSION		0xFC
+
+#define SHA_TPR				0x108
+#define SHA_TCR				0x10C
+#define SHA_TNPR			0x118
+#define SHA_TNCR			0x11C
+#define SHA_PTCR			0x120
+#define SHA_PTCR_TXTEN		(1 << 8)
+#define SHA_PTCR_TXTDIS		(1 << 9)
+#define SHA_PTSR			0x124
+#define SHA_PTSR_TXTEN		(1 << 8)
+
+#endif /* __ATMEL_SHA_REGS_H__ */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
new file mode 100644
index 0000000..8a19df2
--- /dev/null
+++ b/drivers/crypto/atmel-sha.c
@@ -0,0 +1,2911 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL SHA1/SHA256 HW acceleration.
+ *
+ * Copyright (c) 2012 Eukréa Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-sham.c drivers.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/platform_data/crypto-atmel.h>
+#include "atmel-sha-regs.h"
+#include "atmel-authenc.h"
+
+/* SHA flags */
+#define SHA_FLAGS_BUSY			BIT(0)
+#define	SHA_FLAGS_FINAL			BIT(1)
+#define SHA_FLAGS_DMA_ACTIVE	BIT(2)
+#define SHA_FLAGS_OUTPUT_READY	BIT(3)
+#define SHA_FLAGS_INIT			BIT(4)
+#define SHA_FLAGS_CPU			BIT(5)
+#define SHA_FLAGS_DMA_READY		BIT(6)
+#define SHA_FLAGS_DUMP_REG	BIT(7)
+
+/* bits[11:8] are reserved. */
+
+#define SHA_FLAGS_FINUP		BIT(16)
+#define SHA_FLAGS_SG		BIT(17)
+#define SHA_FLAGS_ERROR		BIT(23)
+#define SHA_FLAGS_PAD		BIT(24)
+#define SHA_FLAGS_RESTORE	BIT(25)
+#define SHA_FLAGS_IDATAR0	BIT(26)
+#define SHA_FLAGS_WAIT_DATARDY	BIT(27)
+
+#define SHA_OP_INIT	0
+#define SHA_OP_UPDATE	1
+#define SHA_OP_FINAL	2
+#define SHA_OP_DIGEST	3
+
+#define SHA_BUFFER_LEN		(PAGE_SIZE / 16)
+
+#define ATMEL_SHA_DMA_THRESHOLD		56
+
+struct atmel_sha_caps {
+	bool	has_dma;
+	bool	has_dualbuff;
+	bool	has_sha224;
+	bool	has_sha_384_512;
+	bool	has_uihv;
+	bool	has_hmac;
+};
+
+struct atmel_sha_dev;
+
+/*
+ * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
+struct atmel_sha_reqctx {
+	struct atmel_sha_dev	*dd;
+	unsigned long	flags;
+	unsigned long	op;
+
+	u8	digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
+	u64	digcnt[2];
+	size_t	bufcnt;
+	size_t	buflen;
+	dma_addr_t	dma_addr;
+
+	/* walk state */
+	struct scatterlist	*sg;
+	unsigned int	offset;	/* offset in current sg */
+	unsigned int	total;	/* total request */
+
+	size_t block_size;
+	size_t hash_size;
+
+	u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+};
+
+typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
+
+struct atmel_sha_ctx {
+	struct atmel_sha_dev	*dd;
+	atmel_sha_fn_t		start;
+
+	unsigned long		flags;
+};
+
+#define ATMEL_SHA_QUEUE_LENGTH	50
+
+struct atmel_sha_dma {
+	struct dma_chan			*chan;
+	struct dma_slave_config dma_conf;
+	struct scatterlist	*sg;
+	int			nents;
+	unsigned int		last_sg_length;
+};
+
+struct atmel_sha_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	struct device		*dev;
+	struct clk			*iclk;
+	int					irq;
+	void __iomem		*io_base;
+
+	spinlock_t		lock;
+	int			err;
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
+
+	unsigned long		flags;
+	struct crypto_queue	queue;
+	struct ahash_request	*req;
+	bool			is_async;
+	bool			force_complete;
+	atmel_sha_fn_t		resume;
+	atmel_sha_fn_t		cpu_transfer_complete;
+
+	struct atmel_sha_dma	dma_lch_in;
+
+	struct atmel_sha_caps	caps;
+
+	struct scatterlist	tmp;
+
+	u32	hw_version;
+};
+
+struct atmel_sha_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock;
+};
+
+static struct atmel_sha_drv atmel_sha = {
+	.dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
+};
+
+#ifdef VERBOSE_DEBUG
+static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr)
+{
+	switch (offset) {
+	case SHA_CR:
+		return "CR";
+
+	case SHA_MR:
+		return "MR";
+
+	case SHA_IER:
+		return "IER";
+
+	case SHA_IDR:
+		return "IDR";
+
+	case SHA_IMR:
+		return "IMR";
+
+	case SHA_ISR:
+		return "ISR";
+
+	case SHA_MSR:
+		return "MSR";
+
+	case SHA_BCR:
+		return "BCR";
+
+	case SHA_REG_DIN(0):
+	case SHA_REG_DIN(1):
+	case SHA_REG_DIN(2):
+	case SHA_REG_DIN(3):
+	case SHA_REG_DIN(4):
+	case SHA_REG_DIN(5):
+	case SHA_REG_DIN(6):
+	case SHA_REG_DIN(7):
+	case SHA_REG_DIN(8):
+	case SHA_REG_DIN(9):
+	case SHA_REG_DIN(10):
+	case SHA_REG_DIN(11):
+	case SHA_REG_DIN(12):
+	case SHA_REG_DIN(13):
+	case SHA_REG_DIN(14):
+	case SHA_REG_DIN(15):
+		snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2);
+		break;
+
+	case SHA_REG_DIGEST(0):
+	case SHA_REG_DIGEST(1):
+	case SHA_REG_DIGEST(2):
+	case SHA_REG_DIGEST(3):
+	case SHA_REG_DIGEST(4):
+	case SHA_REG_DIGEST(5):
+	case SHA_REG_DIGEST(6):
+	case SHA_REG_DIGEST(7):
+	case SHA_REG_DIGEST(8):
+	case SHA_REG_DIGEST(9):
+	case SHA_REG_DIGEST(10):
+	case SHA_REG_DIGEST(11):
+	case SHA_REG_DIGEST(12):
+	case SHA_REG_DIGEST(13):
+	case SHA_REG_DIGEST(14):
+	case SHA_REG_DIGEST(15):
+		if (wr)
+			snprintf(tmp, sz, "IDATAR[%u]",
+				 16u + ((offset - SHA_REG_DIGEST(0)) >> 2));
+		else
+			snprintf(tmp, sz, "ODATAR[%u]",
+				 (offset - SHA_REG_DIGEST(0)) >> 2);
+		break;
+
+	case SHA_HW_VERSION:
+		return "HWVER";
+
+	default:
+		snprintf(tmp, sz, "0x%02x", offset);
+		break;
+	}
+
+	return tmp;
+}
+
+#endif /* VERBOSE_DEBUG */
+
+static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
+{
+	u32 value = readl_relaxed(dd->io_base + offset);
+
+#ifdef VERBOSE_DEBUG
+	if (dd->flags & SHA_FLAGS_DUMP_REG) {
+		char tmp[16];
+
+		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
+			 atmel_sha_reg_name(offset, tmp, sizeof(tmp), false));
+	}
+#endif /* VERBOSE_DEBUG */
+
+	return value;
+}
+
+static inline void atmel_sha_write(struct atmel_sha_dev *dd,
+					u32 offset, u32 value)
+{
+#ifdef VERBOSE_DEBUG
+	if (dd->flags & SHA_FLAGS_DUMP_REG) {
+		char tmp[16];
+
+		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
+			 atmel_sha_reg_name(offset, tmp, sizeof(tmp), true));
+	}
+#endif /* VERBOSE_DEBUG */
+
+	writel_relaxed(value, dd->io_base + offset);
+}
+
+static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
+{
+	struct ahash_request *req = dd->req;
+
+	dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
+		       SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY |
+		       SHA_FLAGS_DUMP_REG);
+
+	clk_disable(dd->iclk);
+
+	if ((dd->is_async || dd->force_complete) && req->base.complete)
+		req->base.complete(&req->base, err);
+
+	/* handle new request */
+	tasklet_schedule(&dd->queue_task);
+
+	return err;
+}
+
+static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
+{
+	size_t count;
+
+	while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
+		count = min(ctx->sg->length - ctx->offset, ctx->total);
+		count = min(count, ctx->buflen - ctx->bufcnt);
+
+		if (count <= 0) {
+			/*
+			* Check if count <= 0 because the buffer is full or
+			* because the sg length is 0. In the latest case,
+			* check if there is another sg in the list, a 0 length
+			* sg doesn't necessarily mean the end of the sg list.
+			*/
+			if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
+				ctx->sg = sg_next(ctx->sg);
+				continue;
+			} else {
+				break;
+			}
+		}
+
+		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
+			ctx->offset, count, 0);
+
+		ctx->bufcnt += count;
+		ctx->offset += count;
+		ctx->total -= count;
+
+		if (ctx->offset == ctx->sg->length) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+			else
+				ctx->total = 0;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
+ *
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
+ *  - if message length < 56 bytes then padlen = 56 - message length
+ *  - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ *  - if message length < 112 bytes then padlen = 112 - message length
+ *  - else padlen = 128 + 112 - message length
+ */
+static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
+{
+	unsigned int index, padlen;
+	u64 bits[2];
+	u64 size[2];
+
+	size[0] = ctx->digcnt[0];
+	size[1] = ctx->digcnt[1];
+
+	size[0] += ctx->bufcnt;
+	if (size[0] < ctx->bufcnt)
+		size[1]++;
+
+	size[0] += length;
+	if (size[0]  < length)
+		size[1]++;
+
+	bits[1] = cpu_to_be64(size[0] << 3);
+	bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
+
+	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+	case SHA_FLAGS_SHA384:
+	case SHA_FLAGS_SHA512:
+		index = ctx->bufcnt & 0x7f;
+		padlen = (index < 112) ? (112 - index) : ((128+112) - index);
+		*(ctx->buffer + ctx->bufcnt) = 0x80;
+		memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+		memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
+		ctx->bufcnt += padlen + 16;
+		ctx->flags |= SHA_FLAGS_PAD;
+		break;
+
+	default:
+		index = ctx->bufcnt & 0x3f;
+		padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+		*(ctx->buffer + ctx->bufcnt) = 0x80;
+		memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+		memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
+		ctx->bufcnt += padlen + 8;
+		ctx->flags |= SHA_FLAGS_PAD;
+		break;
+	}
+}
+
+static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
+{
+	struct atmel_sha_dev *dd = NULL;
+	struct atmel_sha_dev *tmp;
+
+	spin_lock_bh(&atmel_sha.lock);
+	if (!tctx->dd) {
+		list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
+			dd = tmp;
+			break;
+		}
+		tctx->dd = dd;
+	} else {
+		dd = tctx->dd;
+	}
+
+	spin_unlock_bh(&atmel_sha.lock);
+
+	return dd;
+}
+
+static int atmel_sha_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
+
+	ctx->dd = dd;
+
+	ctx->flags = 0;
+
+	dev_dbg(dd->dev, "init: digest size: %d\n",
+		crypto_ahash_digestsize(tfm));
+
+	switch (crypto_ahash_digestsize(tfm)) {
+	case SHA1_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA1;
+		ctx->block_size = SHA1_BLOCK_SIZE;
+		break;
+	case SHA224_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA224;
+		ctx->block_size = SHA224_BLOCK_SIZE;
+		break;
+	case SHA256_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA256;
+		ctx->block_size = SHA256_BLOCK_SIZE;
+		break;
+	case SHA384_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA384;
+		ctx->block_size = SHA384_BLOCK_SIZE;
+		break;
+	case SHA512_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA512;
+		ctx->block_size = SHA512_BLOCK_SIZE;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	ctx->bufcnt = 0;
+	ctx->digcnt[0] = 0;
+	ctx->digcnt[1] = 0;
+	ctx->buflen = SHA_BUFFER_LEN;
+
+	return 0;
+}
+
+static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	u32 valmr = SHA_MR_MODE_AUTO;
+	unsigned int i, hashsize = 0;
+
+	if (likely(dma)) {
+		if (!dd->caps.has_dma)
+			atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
+		valmr = SHA_MR_MODE_PDC;
+		if (dd->caps.has_dualbuff)
+			valmr |= SHA_MR_DUALBUFF;
+	} else {
+		atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+	}
+
+	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+	case SHA_FLAGS_SHA1:
+		valmr |= SHA_MR_ALGO_SHA1;
+		hashsize = SHA1_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA224:
+		valmr |= SHA_MR_ALGO_SHA224;
+		hashsize = SHA256_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA256:
+		valmr |= SHA_MR_ALGO_SHA256;
+		hashsize = SHA256_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA384:
+		valmr |= SHA_MR_ALGO_SHA384;
+		hashsize = SHA512_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA512:
+		valmr |= SHA_MR_ALGO_SHA512;
+		hashsize = SHA512_DIGEST_SIZE;
+		break;
+
+	default:
+		break;
+	}
+
+	/* Setting CR_FIRST only for the first iteration */
+	if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
+		atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+	} else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
+		const u32 *hash = (const u32 *)ctx->digest;
+
+		/*
+		 * Restore the hardware context: update the User Initialize
+		 * Hash Value (UIHV) with the value saved when the latest
+		 * 'update' operation completed on this very same crypto
+		 * request.
+		 */
+		ctx->flags &= ~SHA_FLAGS_RESTORE;
+		atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+		for (i = 0; i < hashsize / sizeof(u32); ++i)
+			atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
+		atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+		valmr |= SHA_MR_UIHV;
+	}
+	/*
+	 * WARNING: If the UIHV feature is not available, the hardware CANNOT
+	 * process concurrent requests: the internal registers used to store
+	 * the hash/digest are still set to the partial digest output values
+	 * computed during the latest round.
+	 */
+
+	atmel_sha_write(dd, SHA_MR, valmr);
+}
+
+static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
+						atmel_sha_fn_t resume)
+{
+	u32 isr = atmel_sha_read(dd, SHA_ISR);
+
+	if (unlikely(isr & SHA_INT_DATARDY))
+		return resume(dd);
+
+	dd->resume = resume;
+	atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+	return -EINPROGRESS;
+}
+
+static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
+			      size_t length, int final)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	int count, len32;
+	const u32 *buffer = (const u32 *)buf;
+
+	dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
+		ctx->digcnt[1], ctx->digcnt[0], length, final);
+
+	atmel_sha_write_ctrl(dd, 0);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt[0] += length;
+	if (ctx->digcnt[0] < length)
+		ctx->digcnt[1]++;
+
+	if (final)
+		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+	len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+	dd->flags |= SHA_FLAGS_CPU;
+
+	for (count = 0; count < len32; count++)
+		atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+	return -EINPROGRESS;
+}
+
+static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+		size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	int len32;
+
+	dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
+		ctx->digcnt[1], ctx->digcnt[0], length1, final);
+
+	len32 = DIV_ROUND_UP(length1, sizeof(u32));
+	atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
+	atmel_sha_write(dd, SHA_TPR, dma_addr1);
+	atmel_sha_write(dd, SHA_TCR, len32);
+
+	len32 = DIV_ROUND_UP(length2, sizeof(u32));
+	atmel_sha_write(dd, SHA_TNPR, dma_addr2);
+	atmel_sha_write(dd, SHA_TNCR, len32);
+
+	atmel_sha_write_ctrl(dd, 1);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt[0] += length1;
+	if (ctx->digcnt[0] < length1)
+		ctx->digcnt[1]++;
+
+	if (final)
+		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+	dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
+
+	/* Start DMA transfer */
+	atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
+
+	return -EINPROGRESS;
+}
+
+static void atmel_sha_dma_callback(void *data)
+{
+	struct atmel_sha_dev *dd = data;
+
+	dd->is_async = true;
+
+	/* dma_lch_in - completed - wait DATRDY */
+	atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+}
+
+static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+		size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	struct dma_async_tx_descriptor	*in_desc;
+	struct scatterlist sg[2];
+
+	dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
+		ctx->digcnt[1], ctx->digcnt[0], length1, final);
+
+	dd->dma_lch_in.dma_conf.src_maxburst = 16;
+	dd->dma_lch_in.dma_conf.dst_maxburst = 16;
+
+	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+
+	if (length2) {
+		sg_init_table(sg, 2);
+		sg_dma_address(&sg[0]) = dma_addr1;
+		sg_dma_len(&sg[0]) = length1;
+		sg_dma_address(&sg[1]) = dma_addr2;
+		sg_dma_len(&sg[1]) = length2;
+		in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
+			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	} else {
+		sg_init_table(sg, 1);
+		sg_dma_address(&sg[0]) = dma_addr1;
+		sg_dma_len(&sg[0]) = length1;
+		in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
+			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	}
+	if (!in_desc)
+		return atmel_sha_complete(dd, -EINVAL);
+
+	in_desc->callback = atmel_sha_dma_callback;
+	in_desc->callback_param = dd;
+
+	atmel_sha_write_ctrl(dd, 1);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt[0] += length1;
+	if (ctx->digcnt[0] < length1)
+		ctx->digcnt[1]++;
+
+	if (final)
+		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+	dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
+
+	/* Start DMA transfer */
+	dmaengine_submit(in_desc);
+	dma_async_issue_pending(dd->dma_lch_in.chan);
+
+	return -EINPROGRESS;
+}
+
+static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+		size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+	if (dd->caps.has_dma)
+		return atmel_sha_xmit_dma(dd, dma_addr1, length1,
+				dma_addr2, length2, final);
+	else
+		return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
+				dma_addr2, length2, final);
+}
+
+static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	int bufcnt;
+
+	atmel_sha_append_sg(ctx);
+	atmel_sha_fill_padding(ctx, 0);
+	bufcnt = ctx->bufcnt;
+	ctx->bufcnt = 0;
+
+	return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
+					struct atmel_sha_reqctx *ctx,
+					size_t length, int final)
+{
+	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
+				ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+		dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
+				ctx->block_size);
+		return atmel_sha_complete(dd, -EINVAL);
+	}
+
+	ctx->flags &= ~SHA_FLAGS_SG;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
+}
+
+static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	unsigned int final;
+	size_t count;
+
+	atmel_sha_append_sg(ctx);
+
+	final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
+
+	dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n",
+		 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
+
+	if (final)
+		atmel_sha_fill_padding(ctx, 0);
+
+	if (final || (ctx->bufcnt == ctx->buflen)) {
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+		return atmel_sha_xmit_dma_map(dd, ctx, count, final);
+	}
+
+	return 0;
+}
+
+static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+	unsigned int length, final, tail;
+	struct scatterlist *sg;
+	unsigned int count;
+
+	if (!ctx->total)
+		return 0;
+
+	if (ctx->bufcnt || ctx->offset)
+		return atmel_sha_update_dma_slow(dd);
+
+	dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n",
+		ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
+
+	sg = ctx->sg;
+
+	if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+		return atmel_sha_update_dma_slow(dd);
+
+	if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
+		/* size is not ctx->block_size aligned */
+		return atmel_sha_update_dma_slow(dd);
+
+	length = min(ctx->total, sg->length);
+
+	if (sg_is_last(sg)) {
+		if (!(ctx->flags & SHA_FLAGS_FINUP)) {
+			/* not last sg must be ctx->block_size aligned */
+			tail = length & (ctx->block_size - 1);
+			length -= tail;
+		}
+	}
+
+	ctx->total -= length;
+	ctx->offset = length; /* offset where to start slow */
+
+	final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
+
+	/* Add padding */
+	if (final) {
+		tail = length & (ctx->block_size - 1);
+		length -= tail;
+		ctx->total += tail;
+		ctx->offset = length; /* offset where to start slow */
+
+		sg = ctx->sg;
+		atmel_sha_append_sg(ctx);
+
+		atmel_sha_fill_padding(ctx, length);
+
+		ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
+			ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
+		if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+			dev_err(dd->dev, "dma %zu bytes error\n",
+				ctx->buflen + ctx->block_size);
+			return atmel_sha_complete(dd, -EINVAL);
+		}
+
+		if (length == 0) {
+			ctx->flags &= ~SHA_FLAGS_SG;
+			count = ctx->bufcnt;
+			ctx->bufcnt = 0;
+			return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
+					0, final);
+		} else {
+			ctx->sg = sg;
+			if (!dma_map_sg(dd->dev, ctx->sg, 1,
+				DMA_TO_DEVICE)) {
+					dev_err(dd->dev, "dma_map_sg  error\n");
+					return atmel_sha_complete(dd, -EINVAL);
+			}
+
+			ctx->flags |= SHA_FLAGS_SG;
+
+			count = ctx->bufcnt;
+			ctx->bufcnt = 0;
+			return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
+					length, ctx->dma_addr, count, final);
+		}
+	}
+
+	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+		dev_err(dd->dev, "dma_map_sg  error\n");
+		return atmel_sha_complete(dd, -EINVAL);
+	}
+
+	ctx->flags |= SHA_FLAGS_SG;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
+								0, final);
+}
+
+static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+
+	if (ctx->flags & SHA_FLAGS_SG) {
+		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+		if (ctx->sg->length == ctx->offset) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+		}
+		if (ctx->flags & SHA_FLAGS_PAD) {
+			dma_unmap_single(dd->dev, ctx->dma_addr,
+				ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
+		}
+	} else {
+		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
+						ctx->block_size, DMA_TO_DEVICE);
+	}
+
+	return 0;
+}
+
+static int atmel_sha_update_req(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err;
+
+	dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
+		ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
+
+	if (ctx->flags & SHA_FLAGS_CPU)
+		err = atmel_sha_update_cpu(dd);
+	else
+		err = atmel_sha_update_dma_start(dd);
+
+	/* wait for dma completion before can take more data */
+	dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
+			err, ctx->digcnt[1], ctx->digcnt[0]);
+
+	return err;
+}
+
+static int atmel_sha_final_req(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err = 0;
+	int count;
+
+	if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
+		atmel_sha_fill_padding(ctx, 0);
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+		err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
+	}
+	/* faster to handle last block with cpu */
+	else {
+		atmel_sha_fill_padding(ctx, 0);
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+		err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
+	}
+
+	dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+	return err;
+}
+
+static void atmel_sha_copy_hash(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	u32 *hash = (u32 *)ctx->digest;
+	unsigned int i, hashsize;
+
+	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+	case SHA_FLAGS_SHA1:
+		hashsize = SHA1_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA224:
+	case SHA_FLAGS_SHA256:
+		hashsize = SHA256_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA384:
+	case SHA_FLAGS_SHA512:
+		hashsize = SHA512_DIGEST_SIZE;
+		break;
+
+	default:
+		/* Should not happen... */
+		return;
+	}
+
+	for (i = 0; i < hashsize / sizeof(u32); ++i)
+		hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+	ctx->flags |= SHA_FLAGS_RESTORE;
+}
+
+static void atmel_sha_copy_ready_hash(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!req->result)
+		return;
+
+	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+	default:
+	case SHA_FLAGS_SHA1:
+		memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
+		break;
+
+	case SHA_FLAGS_SHA224:
+		memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
+		break;
+
+	case SHA_FLAGS_SHA256:
+		memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+		break;
+
+	case SHA_FLAGS_SHA384:
+		memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
+		break;
+
+	case SHA_FLAGS_SHA512:
+		memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
+		break;
+	}
+}
+
+static int atmel_sha_finish(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_dev *dd = ctx->dd;
+
+	if (ctx->digcnt[0] || ctx->digcnt[1])
+		atmel_sha_copy_ready_hash(req);
+
+	dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
+		ctx->digcnt[0], ctx->bufcnt);
+
+	return 0;
+}
+
+static void atmel_sha_finish_req(struct ahash_request *req, int err)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_dev *dd = ctx->dd;
+
+	if (!err) {
+		atmel_sha_copy_hash(req);
+		if (SHA_FLAGS_FINAL & dd->flags)
+			err = atmel_sha_finish(req);
+	} else {
+		ctx->flags |= SHA_FLAGS_ERROR;
+	}
+
+	/* atomic operation is not needed here */
+	(void)atmel_sha_complete(dd, err);
+}
+
+static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
+{
+	int err;
+
+	err = clk_enable(dd->iclk);
+	if (err)
+		return err;
+
+	if (!(SHA_FLAGS_INIT & dd->flags)) {
+		atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
+		dd->flags |= SHA_FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
+{
+	return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
+}
+
+static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+{
+	atmel_sha_hw_init(dd);
+
+	dd->hw_version = atmel_sha_get_version(dd);
+
+	dev_info(dd->dev,
+			"version: 0x%x\n", dd->hw_version);
+
+	clk_disable(dd->iclk);
+}
+
+static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
+				  struct ahash_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct atmel_sha_ctx *ctx;
+	unsigned long flags;
+	bool start_async;
+	int err = 0, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&dd->queue, req);
+
+	if (SHA_FLAGS_BUSY & dd->flags) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&dd->queue);
+	async_req = crypto_dequeue_request(&dd->queue);
+	if (async_req)
+		dd->flags |= SHA_FLAGS_BUSY;
+
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	ctx = crypto_tfm_ctx(async_req->tfm);
+
+	dd->req = ahash_request_cast(async_req);
+	start_async = (dd->req != req);
+	dd->is_async = start_async;
+	dd->force_complete = false;
+
+	/* WARNING: ctx->start() MAY change dd->is_async. */
+	err = ctx->start(dd);
+	return (start_async) ? ret : err;
+}
+
+static int atmel_sha_done(struct atmel_sha_dev *dd);
+
+static int atmel_sha_start(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err;
+
+	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+						ctx->op, req->nbytes);
+
+	err = atmel_sha_hw_init(dd);
+	if (err)
+		return atmel_sha_complete(dd, err);
+
+	/*
+	 * atmel_sha_update_req() and atmel_sha_final_req() can return either:
+	 *  -EINPROGRESS: the hardware is busy and the SHA driver will resume
+	 *                its job later in the done_task.
+	 *                This is the main path.
+	 *
+	 * 0: the SHA driver can continue its job then release the hardware
+	 *    later, if needed, with atmel_sha_finish_req().
+	 *    This is the alternate path.
+	 *
+	 * < 0: an error has occurred so atmel_sha_complete(dd, err) has already
+	 *      been called, hence the hardware has been released.
+	 *      The SHA driver must stop its job without calling
+	 *      atmel_sha_finish_req(), otherwise atmel_sha_complete() would be
+	 *      called a second time.
+	 *
+	 * Please note that currently, atmel_sha_final_req() never returns 0.
+	 */
+
+	dd->resume = atmel_sha_done;
+	if (ctx->op == SHA_OP_UPDATE) {
+		err = atmel_sha_update_req(dd);
+		if (!err && (ctx->flags & SHA_FLAGS_FINUP))
+			/* no final() after finup() */
+			err = atmel_sha_final_req(dd);
+	} else if (ctx->op == SHA_OP_FINAL) {
+		err = atmel_sha_final_req(dd);
+	}
+
+	if (!err)
+		/* done_task will not finish it, so do it here */
+		atmel_sha_finish_req(req, err);
+
+	dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+	return err;
+}
+
+static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct atmel_sha_dev *dd = tctx->dd;
+
+	ctx->op = op;
+
+	return atmel_sha_handle_queue(dd, req);
+}
+
+static int atmel_sha_update(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!req->nbytes)
+		return 0;
+
+	ctx->total = req->nbytes;
+	ctx->sg = req->src;
+	ctx->offset = 0;
+
+	if (ctx->flags & SHA_FLAGS_FINUP) {
+		if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
+			/* faster to use CPU for short transfers */
+			ctx->flags |= SHA_FLAGS_CPU;
+	} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
+		atmel_sha_append_sg(ctx);
+		return 0;
+	}
+	return atmel_sha_enqueue(req, SHA_OP_UPDATE);
+}
+
+static int atmel_sha_final(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	ctx->flags |= SHA_FLAGS_FINUP;
+
+	if (ctx->flags & SHA_FLAGS_ERROR)
+		return 0; /* uncompleted hash is not needed */
+
+	if (ctx->flags & SHA_FLAGS_PAD)
+		/* copy ready hash (+ finalize hmac) */
+		return atmel_sha_finish(req);
+
+	return atmel_sha_enqueue(req, SHA_OP_FINAL);
+}
+
+static int atmel_sha_finup(struct ahash_request *req)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err1, err2;
+
+	ctx->flags |= SHA_FLAGS_FINUP;
+
+	err1 = atmel_sha_update(req);
+	if (err1 == -EINPROGRESS ||
+	    (err1 == -EBUSY && (ahash_request_flags(req) &
+				CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		return err1;
+
+	/*
+	 * final() has to be always called to cleanup resources
+	 * even if udpate() failed, except EINPROGRESS
+	 */
+	err2 = atmel_sha_final(req);
+
+	return err1 ?: err2;
+}
+
+static int atmel_sha_digest(struct ahash_request *req)
+{
+	return atmel_sha_init(req) ?: atmel_sha_finup(req);
+}
+
+
+static int atmel_sha_export(struct ahash_request *req, void *out)
+{
+	const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	memcpy(out, ctx, sizeof(*ctx));
+	return 0;
+}
+
+static int atmel_sha_import(struct ahash_request *req, const void *in)
+{
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	memcpy(ctx, in, sizeof(*ctx));
+	return 0;
+}
+
+static int atmel_sha_cra_init(struct crypto_tfm *tfm)
+{
+	struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct atmel_sha_reqctx));
+	ctx->start = atmel_sha_start;
+
+	return 0;
+}
+
+static struct ahash_alg sha_1_256_algs[] = {
+{
+	.init		= atmel_sha_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.finup		= atmel_sha_finup,
+	.digest		= atmel_sha_digest,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA1_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "sha1",
+			.cra_driver_name	= "atmel-sha1",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA1_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_cra_init,
+		}
+	}
+},
+{
+	.init		= atmel_sha_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.finup		= atmel_sha_finup,
+	.digest		= atmel_sha_digest,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA256_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "sha256",
+			.cra_driver_name	= "atmel-sha256",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA256_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_cra_init,
+		}
+	}
+},
+};
+
+static struct ahash_alg sha_224_alg = {
+	.init		= atmel_sha_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.finup		= atmel_sha_finup,
+	.digest		= atmel_sha_digest,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA224_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "sha224",
+			.cra_driver_name	= "atmel-sha224",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA224_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_cra_init,
+		}
+	}
+};
+
+static struct ahash_alg sha_384_512_algs[] = {
+{
+	.init		= atmel_sha_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.finup		= atmel_sha_finup,
+	.digest		= atmel_sha_digest,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA384_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "sha384",
+			.cra_driver_name	= "atmel-sha384",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA384_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_ctx),
+			.cra_alignmask		= 0x3,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_cra_init,
+		}
+	}
+},
+{
+	.init		= atmel_sha_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.finup		= atmel_sha_finup,
+	.digest		= atmel_sha_digest,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA512_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "sha512",
+			.cra_driver_name	= "atmel-sha512",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA512_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_ctx),
+			.cra_alignmask		= 0x3,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_cra_init,
+		}
+	}
+},
+};
+
+static void atmel_sha_queue_task(unsigned long data)
+{
+	struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+
+	atmel_sha_handle_queue(dd, NULL);
+}
+
+static int atmel_sha_done(struct atmel_sha_dev *dd)
+{
+	int err = 0;
+
+	if (SHA_FLAGS_CPU & dd->flags) {
+		if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
+			dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
+			goto finish;
+		}
+	} else if (SHA_FLAGS_DMA_READY & dd->flags) {
+		if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
+			dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
+			atmel_sha_update_dma_stop(dd);
+			if (dd->err) {
+				err = dd->err;
+				goto finish;
+			}
+		}
+		if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
+			/* hash or semi-hash ready */
+			dd->flags &= ~(SHA_FLAGS_DMA_READY |
+						SHA_FLAGS_OUTPUT_READY);
+			err = atmel_sha_update_dma_start(dd);
+			if (err != -EINPROGRESS)
+				goto finish;
+		}
+	}
+	return err;
+
+finish:
+	/* finish curent request */
+	atmel_sha_finish_req(dd->req, err);
+
+	return err;
+}
+
+static void atmel_sha_done_task(unsigned long data)
+{
+	struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+
+	dd->is_async = true;
+	(void)dd->resume(dd);
+}
+
+static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
+{
+	struct atmel_sha_dev *sha_dd = dev_id;
+	u32 reg;
+
+	reg = atmel_sha_read(sha_dd, SHA_ISR);
+	if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
+		atmel_sha_write(sha_dd, SHA_IDR, reg);
+		if (SHA_FLAGS_BUSY & sha_dd->flags) {
+			sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
+			if (!(SHA_FLAGS_CPU & sha_dd->flags))
+				sha_dd->flags |= SHA_FLAGS_DMA_READY;
+			tasklet_schedule(&sha_dd->done_task);
+		} else {
+			dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
+		}
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+
+/* DMA transfer functions */
+
+static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
+					struct scatterlist *sg,
+					size_t len)
+{
+	struct atmel_sha_dma *dma = &dd->dma_lch_in;
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	size_t bs = ctx->block_size;
+	int nents;
+
+	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
+		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+			return false;
+
+		/*
+		 * This is the last sg, the only one that is allowed to
+		 * have an unaligned length.
+		 */
+		if (len <= sg->length) {
+			dma->nents = nents + 1;
+			dma->last_sg_length = sg->length;
+			sg->length = ALIGN(len, sizeof(u32));
+			return true;
+		}
+
+		/* All other sg lengths MUST be aligned to the block size. */
+		if (!IS_ALIGNED(sg->length, bs))
+			return false;
+
+		len -= sg->length;
+	}
+
+	return false;
+}
+
+static void atmel_sha_dma_callback2(void *data)
+{
+	struct atmel_sha_dev *dd = data;
+	struct atmel_sha_dma *dma = &dd->dma_lch_in;
+	struct scatterlist *sg;
+	int nents;
+
+	dmaengine_terminate_all(dma->chan);
+	dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
+
+	sg = dma->sg;
+	for (nents = 0; nents < dma->nents - 1; ++nents)
+		sg = sg_next(sg);
+	sg->length = dma->last_sg_length;
+
+	dd->is_async = true;
+	(void)atmel_sha_wait_for_data_ready(dd, dd->resume);
+}
+
+static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
+			       struct scatterlist *src,
+			       size_t len,
+			       atmel_sha_fn_t resume)
+{
+	struct atmel_sha_dma *dma = &dd->dma_lch_in;
+	struct dma_slave_config *config = &dma->dma_conf;
+	struct dma_chan *chan = dma->chan;
+	struct dma_async_tx_descriptor *desc;
+	dma_cookie_t cookie;
+	unsigned int sg_len;
+	int err;
+
+	dd->resume = resume;
+
+	/*
+	 * dma->nents has already been initialized by
+	 * atmel_sha_dma_check_aligned().
+	 */
+	dma->sg = src;
+	sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
+	if (!sg_len) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	config->src_maxburst = 16;
+	config->dst_maxburst = 16;
+	err = dmaengine_slave_config(chan, config);
+	if (err)
+		goto unmap_sg;
+
+	desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV,
+				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc) {
+		err = -ENOMEM;
+		goto unmap_sg;
+	}
+
+	desc->callback = atmel_sha_dma_callback2;
+	desc->callback_param = dd;
+	cookie = dmaengine_submit(desc);
+	err = dma_submit_error(cookie);
+	if (err)
+		goto unmap_sg;
+
+	dma_async_issue_pending(chan);
+
+	return -EINPROGRESS;
+
+unmap_sg:
+	dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
+exit:
+	return atmel_sha_complete(dd, err);
+}
+
+
+/* CPU transfer functions */
+
+static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	const u32 *words = (const u32 *)ctx->buffer;
+	size_t i, num_words;
+	u32 isr, din, din_inc;
+
+	din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
+	for (;;) {
+		/* Write data into the Input Data Registers. */
+		num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
+		for (i = 0, din = 0; i < num_words; ++i, din += din_inc)
+			atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
+
+		ctx->offset += ctx->bufcnt;
+		ctx->total -= ctx->bufcnt;
+
+		if (!ctx->total)
+			break;
+
+		/*
+		 * Prepare next block:
+		 * Fill ctx->buffer now with the next data to be written into
+		 * IDATARx: it gives time for the SHA hardware to process
+		 * the current data so the SHA_INT_DATARDY flag might be set
+		 * in SHA_ISR when polling this register at the beginning of
+		 * the next loop.
+		 */
+		ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
+		scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
+					 ctx->offset, ctx->bufcnt, 0);
+
+		/* Wait for hardware to be ready again. */
+		isr = atmel_sha_read(dd, SHA_ISR);
+		if (!(isr & SHA_INT_DATARDY)) {
+			/* Not ready yet. */
+			dd->resume = atmel_sha_cpu_transfer;
+			atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+			return -EINPROGRESS;
+		}
+	}
+
+	if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
+		return dd->cpu_transfer_complete(dd);
+
+	return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
+}
+
+static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
+			       struct scatterlist *sg,
+			       unsigned int len,
+			       bool idatar0_only,
+			       bool wait_data_ready,
+			       atmel_sha_fn_t resume)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!len)
+		return resume(dd);
+
+	ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
+
+	if (idatar0_only)
+		ctx->flags |= SHA_FLAGS_IDATAR0;
+
+	if (wait_data_ready)
+		ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
+
+	ctx->sg = sg;
+	ctx->total = len;
+	ctx->offset = 0;
+
+	/* Prepare the first block to be written. */
+	ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
+	scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
+				 ctx->offset, ctx->bufcnt, 0);
+
+	dd->cpu_transfer_complete = resume;
+	return atmel_sha_cpu_transfer(dd);
+}
+
+static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
+			      const void *data, unsigned int datalen,
+			      bool auto_padding,
+			      atmel_sha_fn_t resume)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	u32 msglen = (auto_padding) ? datalen : 0;
+	u32 mr = SHA_MR_MODE_AUTO;
+
+	if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
+		return atmel_sha_complete(dd, -EINVAL);
+
+	mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
+	atmel_sha_write(dd, SHA_MR, mr);
+	atmel_sha_write(dd, SHA_MSR, msglen);
+	atmel_sha_write(dd, SHA_BCR, msglen);
+	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+
+	sg_init_one(&dd->tmp, data, datalen);
+	return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
+}
+
+
+/* hmac functions */
+
+struct atmel_sha_hmac_key {
+	bool			valid;
+	unsigned int		keylen;
+	u8			buffer[SHA512_BLOCK_SIZE];
+	u8			*keydup;
+};
+
+static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey)
+{
+	memset(hkey, 0, sizeof(*hkey));
+}
+
+static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey)
+{
+	kfree(hkey->keydup);
+	memset(hkey, 0, sizeof(*hkey));
+}
+
+static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey,
+					 const u8 *key,
+					 unsigned int keylen)
+{
+	atmel_sha_hmac_key_release(hkey);
+
+	if (keylen > sizeof(hkey->buffer)) {
+		hkey->keydup = kmemdup(key, keylen, GFP_KERNEL);
+		if (!hkey->keydup)
+			return -ENOMEM;
+
+	} else {
+		memcpy(hkey->buffer, key, keylen);
+	}
+
+	hkey->valid = true;
+	hkey->keylen = keylen;
+	return 0;
+}
+
+static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey,
+					  const u8 **key,
+					  unsigned int *keylen)
+{
+	if (!hkey->valid)
+		return false;
+
+	*keylen = hkey->keylen;
+	*key = (hkey->keydup) ? hkey->keydup : hkey->buffer;
+	return true;
+}
+
+
+struct atmel_sha_hmac_ctx {
+	struct atmel_sha_ctx	base;
+
+	struct atmel_sha_hmac_key	hkey;
+	u32			ipad[SHA512_BLOCK_SIZE / sizeof(u32)];
+	u32			opad[SHA512_BLOCK_SIZE / sizeof(u32)];
+	atmel_sha_fn_t		resume;
+};
+
+static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
+				atmel_sha_fn_t resume);
+static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
+				      const u8 *key, unsigned int keylen);
+static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
+static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
+static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
+static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
+
+static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
+static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
+static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
+static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
+
+static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
+				atmel_sha_fn_t resume)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	unsigned int keylen;
+	const u8 *key;
+	size_t bs;
+
+	hmac->resume = resume;
+	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+	case SHA_FLAGS_SHA1:
+		ctx->block_size = SHA1_BLOCK_SIZE;
+		ctx->hash_size = SHA1_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA224:
+		ctx->block_size = SHA224_BLOCK_SIZE;
+		ctx->hash_size = SHA256_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA256:
+		ctx->block_size = SHA256_BLOCK_SIZE;
+		ctx->hash_size = SHA256_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA384:
+		ctx->block_size = SHA384_BLOCK_SIZE;
+		ctx->hash_size = SHA512_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA512:
+		ctx->block_size = SHA512_BLOCK_SIZE;
+		ctx->hash_size = SHA512_DIGEST_SIZE;
+		break;
+
+	default:
+		return atmel_sha_complete(dd, -EINVAL);
+	}
+	bs = ctx->block_size;
+
+	if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen)))
+		return resume(dd);
+
+	/* Compute K' from K. */
+	if (unlikely(keylen > bs))
+		return atmel_sha_hmac_prehash_key(dd, key, keylen);
+
+	/* Prepare ipad. */
+	memcpy((u8 *)hmac->ipad, key, keylen);
+	memset((u8 *)hmac->ipad + keylen, 0, bs - keylen);
+	return atmel_sha_hmac_compute_ipad_hash(dd);
+}
+
+static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
+				      const u8 *key, unsigned int keylen)
+{
+	return atmel_sha_cpu_hash(dd, key, keylen, true,
+				  atmel_sha_hmac_prehash_key_done);
+}
+
+static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	size_t ds = crypto_ahash_digestsize(tfm);
+	size_t bs = ctx->block_size;
+	size_t i, num_words = ds / sizeof(u32);
+
+	/* Prepare ipad. */
+	for (i = 0; i < num_words; ++i)
+		hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
+	memset((u8 *)hmac->ipad + ds, 0, bs - ds);
+	return atmel_sha_hmac_compute_ipad_hash(dd);
+}
+
+static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	size_t bs = ctx->block_size;
+	size_t i, num_words = bs / sizeof(u32);
+
+	memcpy(hmac->opad, hmac->ipad, bs);
+	for (i = 0; i < num_words; ++i) {
+		hmac->ipad[i] ^= 0x36363636;
+		hmac->opad[i] ^= 0x5c5c5c5c;
+	}
+
+	return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
+				  atmel_sha_hmac_compute_opad_hash);
+}
+
+static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	size_t bs = ctx->block_size;
+	size_t hs = ctx->hash_size;
+	size_t i, num_words = hs / sizeof(u32);
+
+	for (i = 0; i < num_words; ++i)
+		hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
+	return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
+				  atmel_sha_hmac_setup_done);
+}
+
+static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	size_t hs = ctx->hash_size;
+	size_t i, num_words = hs / sizeof(u32);
+
+	for (i = 0; i < num_words; ++i)
+		hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
+	atmel_sha_hmac_key_release(&hmac->hkey);
+	return hmac->resume(dd);
+}
+
+static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err;
+
+	err = atmel_sha_hw_init(dd);
+	if (err)
+		return atmel_sha_complete(dd, err);
+
+	switch (ctx->op) {
+	case SHA_OP_INIT:
+		err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
+		break;
+
+	case SHA_OP_UPDATE:
+		dd->resume = atmel_sha_done;
+		err = atmel_sha_update_req(dd);
+		break;
+
+	case SHA_OP_FINAL:
+		dd->resume = atmel_sha_hmac_final;
+		err = atmel_sha_final_req(dd);
+		break;
+
+	case SHA_OP_DIGEST:
+		err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
+		break;
+
+	default:
+		return atmel_sha_complete(dd, -EINVAL);
+	}
+
+	return err;
+}
+
+static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+
+	if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
+		crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int atmel_sha_hmac_init(struct ahash_request *req)
+{
+	int err;
+
+	err = atmel_sha_init(req);
+	if (err)
+		return err;
+
+	return atmel_sha_enqueue(req, SHA_OP_INIT);
+}
+
+static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	size_t bs = ctx->block_size;
+	size_t hs = ctx->hash_size;
+
+	ctx->bufcnt = 0;
+	ctx->digcnt[0] = bs;
+	ctx->digcnt[1] = 0;
+	ctx->flags |= SHA_FLAGS_RESTORE;
+	memcpy(ctx->digest, hmac->ipad, hs);
+	return atmel_sha_complete(dd, 0);
+}
+
+static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	u32 *digest = (u32 *)ctx->digest;
+	size_t ds = crypto_ahash_digestsize(tfm);
+	size_t bs = ctx->block_size;
+	size_t hs = ctx->hash_size;
+	size_t i, num_words;
+	u32 mr;
+
+	/* Save d = SHA((K' + ipad) | msg). */
+	num_words = ds / sizeof(u32);
+	for (i = 0; i < num_words; ++i)
+		digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
+
+	/* Restore context to finish computing SHA((K' + opad) | d). */
+	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+	num_words = hs / sizeof(u32);
+	for (i = 0; i < num_words; ++i)
+		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
+
+	mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV;
+	mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
+	atmel_sha_write(dd, SHA_MR, mr);
+	atmel_sha_write(dd, SHA_MSR, bs + ds);
+	atmel_sha_write(dd, SHA_BCR, ds);
+	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+
+	sg_init_one(&dd->tmp, digest, ds);
+	return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
+				   atmel_sha_hmac_final_done);
+}
+
+static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
+{
+	/*
+	 * req->result might not be sizeof(u32) aligned, so copy the
+	 * digest into ctx->digest[] before memcpy() the data into
+	 * req->result.
+	 */
+	atmel_sha_copy_hash(dd->req);
+	atmel_sha_copy_ready_hash(dd->req);
+	return atmel_sha_complete(dd, 0);
+}
+
+static int atmel_sha_hmac_digest(struct ahash_request *req)
+{
+	int err;
+
+	err = atmel_sha_init(req);
+	if (err)
+		return err;
+
+	return atmel_sha_enqueue(req, SHA_OP_DIGEST);
+}
+
+static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	size_t hs = ctx->hash_size;
+	size_t i, num_words = hs / sizeof(u32);
+	bool use_dma = false;
+	u32 mr;
+
+	/* Special case for empty message. */
+	if (!req->nbytes)
+		return atmel_sha_complete(dd, -EINVAL); // TODO:
+
+	/* Check DMA threshold and alignment. */
+	if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
+	    atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
+		use_dma = true;
+
+	/* Write both initial hash values to compute a HMAC. */
+	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+	for (i = 0; i < num_words; ++i)
+		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
+
+	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
+	for (i = 0; i < num_words; ++i)
+		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
+
+	/* Write the Mode, Message Size, Bytes Count then Control Registers. */
+	mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF);
+	mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
+	if (use_dma)
+		mr |= SHA_MR_MODE_IDATAR0;
+	else
+		mr |= SHA_MR_MODE_AUTO;
+	atmel_sha_write(dd, SHA_MR, mr);
+
+	atmel_sha_write(dd, SHA_MSR, req->nbytes);
+	atmel_sha_write(dd, SHA_BCR, req->nbytes);
+
+	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+
+	/* Process data. */
+	if (use_dma)
+		return atmel_sha_dma_start(dd, req->src, req->nbytes,
+					   atmel_sha_hmac_final_done);
+
+	return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
+				   atmel_sha_hmac_final_done);
+}
+
+static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct atmel_sha_reqctx));
+	hmac->base.start = atmel_sha_hmac_start;
+	atmel_sha_hmac_key_init(&hmac->hkey);
+
+	return 0;
+}
+
+static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+	struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
+
+	atmel_sha_hmac_key_release(&hmac->hkey);
+}
+
+static struct ahash_alg sha_hmac_algs[] = {
+{
+	.init		= atmel_sha_hmac_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.digest		= atmel_sha_hmac_digest,
+	.setkey		= atmel_sha_hmac_setkey,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA1_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "hmac(sha1)",
+			.cra_driver_name	= "atmel-hmac-sha1",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA1_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_hmac_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_hmac_cra_init,
+			.cra_exit		= atmel_sha_hmac_cra_exit,
+		}
+	}
+},
+{
+	.init		= atmel_sha_hmac_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.digest		= atmel_sha_hmac_digest,
+	.setkey		= atmel_sha_hmac_setkey,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA224_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "hmac(sha224)",
+			.cra_driver_name	= "atmel-hmac-sha224",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA224_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_hmac_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_hmac_cra_init,
+			.cra_exit		= atmel_sha_hmac_cra_exit,
+		}
+	}
+},
+{
+	.init		= atmel_sha_hmac_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.digest		= atmel_sha_hmac_digest,
+	.setkey		= atmel_sha_hmac_setkey,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA256_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "hmac(sha256)",
+			.cra_driver_name	= "atmel-hmac-sha256",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA256_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_hmac_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_hmac_cra_init,
+			.cra_exit		= atmel_sha_hmac_cra_exit,
+		}
+	}
+},
+{
+	.init		= atmel_sha_hmac_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.digest		= atmel_sha_hmac_digest,
+	.setkey		= atmel_sha_hmac_setkey,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA384_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "hmac(sha384)",
+			.cra_driver_name	= "atmel-hmac-sha384",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA384_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_hmac_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_hmac_cra_init,
+			.cra_exit		= atmel_sha_hmac_cra_exit,
+		}
+	}
+},
+{
+	.init		= atmel_sha_hmac_init,
+	.update		= atmel_sha_update,
+	.final		= atmel_sha_final,
+	.digest		= atmel_sha_hmac_digest,
+	.setkey		= atmel_sha_hmac_setkey,
+	.export		= atmel_sha_export,
+	.import		= atmel_sha_import,
+	.halg = {
+		.digestsize	= SHA512_DIGEST_SIZE,
+		.statesize	= sizeof(struct atmel_sha_reqctx),
+		.base	= {
+			.cra_name		= "hmac(sha512)",
+			.cra_driver_name	= "atmel-hmac-sha512",
+			.cra_priority		= 100,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA512_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct atmel_sha_hmac_ctx),
+			.cra_alignmask		= 0,
+			.cra_module		= THIS_MODULE,
+			.cra_init		= atmel_sha_hmac_cra_init,
+			.cra_exit		= atmel_sha_hmac_cra_exit,
+		}
+	}
+},
+};
+
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+/* authenc functions */
+
+static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
+static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd);
+static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd);
+
+
+struct atmel_sha_authenc_ctx {
+	struct crypto_ahash	*tfm;
+};
+
+struct atmel_sha_authenc_reqctx {
+	struct atmel_sha_reqctx	base;
+
+	atmel_aes_authenc_fn_t	cb;
+	struct atmel_aes_dev	*aes_dev;
+
+	/* _init() parameters. */
+	struct scatterlist	*assoc;
+	u32			assoclen;
+	u32			textlen;
+
+	/* _final() parameters. */
+	u32			*digest;
+	unsigned int		digestlen;
+};
+
+static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
+				       int err)
+{
+	struct ahash_request *req = areq->data;
+	struct atmel_sha_authenc_reqctx *authctx  = ahash_request_ctx(req);
+
+	authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
+}
+
+static int atmel_sha_authenc_start(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
+	int err;
+
+	/*
+	 * Force atmel_sha_complete() to call req->base.complete(), ie
+	 * atmel_sha_authenc_complete(), which in turn calls authctx->cb().
+	 */
+	dd->force_complete = true;
+
+	err = atmel_sha_hw_init(dd);
+	return authctx->cb(authctx->aes_dev, err, dd->is_async);
+}
+
+bool atmel_sha_authenc_is_ready(void)
+{
+	struct atmel_sha_ctx dummy;
+
+	dummy.dd = NULL;
+	return (atmel_sha_find_dev(&dummy) != NULL);
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready);
+
+unsigned int atmel_sha_authenc_get_reqsize(void)
+{
+	return sizeof(struct atmel_sha_authenc_reqctx);
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize);
+
+struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
+{
+	struct atmel_sha_authenc_ctx *auth;
+	struct crypto_ahash *tfm;
+	struct atmel_sha_ctx *tctx;
+	const char *name;
+	int err = -EINVAL;
+
+	switch (mode & SHA_FLAGS_MODE_MASK) {
+	case SHA_FLAGS_HMAC_SHA1:
+		name = "atmel-hmac-sha1";
+		break;
+
+	case SHA_FLAGS_HMAC_SHA224:
+		name = "atmel-hmac-sha224";
+		break;
+
+	case SHA_FLAGS_HMAC_SHA256:
+		name = "atmel-hmac-sha256";
+		break;
+
+	case SHA_FLAGS_HMAC_SHA384:
+		name = "atmel-hmac-sha384";
+		break;
+
+	case SHA_FLAGS_HMAC_SHA512:
+		name = "atmel-hmac-sha512";
+		break;
+
+	default:
+		goto error;
+	}
+
+	tfm = crypto_alloc_ahash(name, 0, 0);
+	if (IS_ERR(tfm)) {
+		err = PTR_ERR(tfm);
+		goto error;
+	}
+	tctx = crypto_ahash_ctx(tfm);
+	tctx->start = atmel_sha_authenc_start;
+	tctx->flags = mode;
+
+	auth = kzalloc(sizeof(*auth), GFP_KERNEL);
+	if (!auth) {
+		err = -ENOMEM;
+		goto err_free_ahash;
+	}
+	auth->tfm = tfm;
+
+	return auth;
+
+err_free_ahash:
+	crypto_free_ahash(tfm);
+error:
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn);
+
+void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
+{
+	if (auth)
+		crypto_free_ahash(auth->tfm);
+	kfree(auth);
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
+
+int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
+			     const u8 *key, unsigned int keylen,
+			     u32 *flags)
+{
+	struct crypto_ahash *tfm = auth->tfm;
+	int err;
+
+	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
+	crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
+	err = crypto_ahash_setkey(tfm, key, keylen);
+	*flags = crypto_ahash_get_flags(tfm);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
+
+int atmel_sha_authenc_schedule(struct ahash_request *req,
+			       struct atmel_sha_authenc_ctx *auth,
+			       atmel_aes_authenc_fn_t cb,
+			       struct atmel_aes_dev *aes_dev)
+{
+	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
+	struct atmel_sha_reqctx *ctx = &authctx->base;
+	struct crypto_ahash *tfm = auth->tfm;
+	struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct atmel_sha_dev *dd;
+
+	/* Reset request context (MUST be done first). */
+	memset(authctx, 0, sizeof(*authctx));
+
+	/* Get SHA device. */
+	dd = atmel_sha_find_dev(tctx);
+	if (!dd)
+		return cb(aes_dev, -ENODEV, false);
+
+	/* Init request context. */
+	ctx->dd = dd;
+	ctx->buflen = SHA_BUFFER_LEN;
+	authctx->cb = cb;
+	authctx->aes_dev = aes_dev;
+	ahash_request_set_tfm(req, tfm);
+	ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req);
+
+	return atmel_sha_handle_queue(dd, req);
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule);
+
+int atmel_sha_authenc_init(struct ahash_request *req,
+			   struct scatterlist *assoc, unsigned int assoclen,
+			   unsigned int textlen,
+			   atmel_aes_authenc_fn_t cb,
+			   struct atmel_aes_dev *aes_dev)
+{
+	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
+	struct atmel_sha_reqctx *ctx = &authctx->base;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	struct atmel_sha_dev *dd = ctx->dd;
+
+	if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32))))
+		return atmel_sha_complete(dd, -EINVAL);
+
+	authctx->cb = cb;
+	authctx->aes_dev = aes_dev;
+	authctx->assoc = assoc;
+	authctx->assoclen = assoclen;
+	authctx->textlen = textlen;
+
+	ctx->flags = hmac->base.flags;
+	return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2);
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_init);
+
+static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
+	struct atmel_sha_reqctx *ctx = &authctx->base;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+	size_t hs = ctx->hash_size;
+	size_t i, num_words = hs / sizeof(u32);
+	u32 mr, msg_size;
+
+	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+	for (i = 0; i < num_words; ++i)
+		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
+
+	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
+	for (i = 0; i < num_words; ++i)
+		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
+
+	mr = (SHA_MR_MODE_IDATAR0 |
+	      SHA_MR_HMAC |
+	      SHA_MR_DUALBUFF);
+	mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
+	atmel_sha_write(dd, SHA_MR, mr);
+
+	msg_size = authctx->assoclen + authctx->textlen;
+	atmel_sha_write(dd, SHA_MSR, msg_size);
+	atmel_sha_write(dd, SHA_BCR, msg_size);
+
+	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+
+	/* Process assoc data. */
+	return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen,
+				   true, false,
+				   atmel_sha_authenc_init_done);
+}
+
+static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
+
+	return authctx->cb(authctx->aes_dev, 0, dd->is_async);
+}
+
+int atmel_sha_authenc_final(struct ahash_request *req,
+			    u32 *digest, unsigned int digestlen,
+			    atmel_aes_authenc_fn_t cb,
+			    struct atmel_aes_dev *aes_dev)
+{
+	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
+	struct atmel_sha_reqctx *ctx = &authctx->base;
+	struct atmel_sha_dev *dd = ctx->dd;
+
+	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+	case SHA_FLAGS_SHA1:
+		authctx->digestlen = SHA1_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA224:
+		authctx->digestlen = SHA224_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA256:
+		authctx->digestlen = SHA256_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA384:
+		authctx->digestlen = SHA384_DIGEST_SIZE;
+		break;
+
+	case SHA_FLAGS_SHA512:
+		authctx->digestlen = SHA512_DIGEST_SIZE;
+		break;
+
+	default:
+		return atmel_sha_complete(dd, -EINVAL);
+	}
+	if (authctx->digestlen > digestlen)
+		authctx->digestlen = digestlen;
+
+	authctx->cb = cb;
+	authctx->aes_dev = aes_dev;
+	authctx->digest = digest;
+	return atmel_sha_wait_for_data_ready(dd,
+					     atmel_sha_authenc_final_done);
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_final);
+
+static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
+	size_t i, num_words = authctx->digestlen / sizeof(u32);
+
+	for (i = 0; i < num_words; ++i)
+		authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
+
+	return atmel_sha_complete(dd, 0);
+}
+
+void atmel_sha_authenc_abort(struct ahash_request *req)
+{
+	struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
+	struct atmel_sha_reqctx *ctx = &authctx->base;
+	struct atmel_sha_dev *dd = ctx->dd;
+
+	/* Prevent atmel_sha_complete() from calling req->base.complete(). */
+	dd->is_async = false;
+	dd->force_complete = false;
+	(void)atmel_sha_complete(dd, 0);
+}
+EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
+
+#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
+
+
+static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
+{
+	int i;
+
+	if (dd->caps.has_hmac)
+		for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
+			crypto_unregister_ahash(&sha_hmac_algs[i]);
+
+	for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
+		crypto_unregister_ahash(&sha_1_256_algs[i]);
+
+	if (dd->caps.has_sha224)
+		crypto_unregister_ahash(&sha_224_alg);
+
+	if (dd->caps.has_sha_384_512) {
+		for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
+			crypto_unregister_ahash(&sha_384_512_algs[i]);
+	}
+}
+
+static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
+{
+	int err, i, j;
+
+	for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+		err = crypto_register_ahash(&sha_1_256_algs[i]);
+		if (err)
+			goto err_sha_1_256_algs;
+	}
+
+	if (dd->caps.has_sha224) {
+		err = crypto_register_ahash(&sha_224_alg);
+		if (err)
+			goto err_sha_224_algs;
+	}
+
+	if (dd->caps.has_sha_384_512) {
+		for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+			err = crypto_register_ahash(&sha_384_512_algs[i]);
+			if (err)
+				goto err_sha_384_512_algs;
+		}
+	}
+
+	if (dd->caps.has_hmac) {
+		for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
+			err = crypto_register_ahash(&sha_hmac_algs[i]);
+			if (err)
+				goto err_sha_hmac_algs;
+		}
+	}
+
+	return 0;
+
+	/*i = ARRAY_SIZE(sha_hmac_algs);*/
+err_sha_hmac_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_ahash(&sha_hmac_algs[j]);
+	i = ARRAY_SIZE(sha_384_512_algs);
+err_sha_384_512_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_ahash(&sha_384_512_algs[j]);
+	crypto_unregister_ahash(&sha_224_alg);
+err_sha_224_algs:
+	i = ARRAY_SIZE(sha_1_256_algs);
+err_sha_1_256_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_ahash(&sha_1_256_algs[j]);
+
+	return err;
+}
+
+static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+{
+	struct at_dma_slave	*sl = slave;
+
+	if (sl && sl->dma_dev == chan->device->dev) {
+		chan->private = sl;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
+				struct crypto_platform_data *pdata)
+{
+	dma_cap_mask_t mask_in;
+
+	/* Try to grab DMA channel */
+	dma_cap_zero(mask_in);
+	dma_cap_set(DMA_SLAVE, mask_in);
+
+	dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
+			atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+	if (!dd->dma_lch_in.chan) {
+		dev_warn(dd->dev, "no DMA channel available\n");
+		return -ENODEV;
+	}
+
+	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+		SHA_REG_DIN(0);
+	dd->dma_lch_in.dma_conf.src_maxburst = 1;
+	dd->dma_lch_in.dma_conf.src_addr_width =
+		DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+	dd->dma_lch_in.dma_conf.dst_addr_width =
+		DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dd->dma_lch_in.dma_conf.device_fc = false;
+
+	return 0;
+}
+
+static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
+{
+	dma_release_channel(dd->dma_lch_in.chan);
+}
+
+static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
+{
+
+	dd->caps.has_dma = 0;
+	dd->caps.has_dualbuff = 0;
+	dd->caps.has_sha224 = 0;
+	dd->caps.has_sha_384_512 = 0;
+	dd->caps.has_uihv = 0;
+	dd->caps.has_hmac = 0;
+
+	/* keep only major version number */
+	switch (dd->hw_version & 0xff0) {
+	case 0x510:
+		dd->caps.has_dma = 1;
+		dd->caps.has_dualbuff = 1;
+		dd->caps.has_sha224 = 1;
+		dd->caps.has_sha_384_512 = 1;
+		dd->caps.has_uihv = 1;
+		dd->caps.has_hmac = 1;
+		break;
+	case 0x420:
+		dd->caps.has_dma = 1;
+		dd->caps.has_dualbuff = 1;
+		dd->caps.has_sha224 = 1;
+		dd->caps.has_sha_384_512 = 1;
+		dd->caps.has_uihv = 1;
+		break;
+	case 0x410:
+		dd->caps.has_dma = 1;
+		dd->caps.has_dualbuff = 1;
+		dd->caps.has_sha224 = 1;
+		dd->caps.has_sha_384_512 = 1;
+		break;
+	case 0x400:
+		dd->caps.has_dma = 1;
+		dd->caps.has_dualbuff = 1;
+		dd->caps.has_sha224 = 1;
+		break;
+	case 0x320:
+		break;
+	default:
+		dev_warn(dd->dev,
+				"Unmanaged sha version, set minimum capabilities\n");
+		break;
+	}
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_sha_dt_ids[] = {
+	{ .compatible = "atmel,at91sam9g46-sha" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
+
+static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct crypto_platform_data *pdata;
+
+	if (!np) {
+		dev_err(&pdev->dev, "device node not found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->dma_slave = devm_kzalloc(&pdev->dev,
+					sizeof(*(pdata->dma_slave)),
+					GFP_KERNEL);
+	if (!pdata->dma_slave)
+		return ERR_PTR(-ENOMEM);
+
+	return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
+{
+	return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int atmel_sha_probe(struct platform_device *pdev)
+{
+	struct atmel_sha_dev *sha_dd;
+	struct crypto_platform_data	*pdata;
+	struct device *dev = &pdev->dev;
+	struct resource *sha_res;
+	int err;
+
+	sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
+	if (sha_dd == NULL) {
+		err = -ENOMEM;
+		goto sha_dd_err;
+	}
+
+	sha_dd->dev = dev;
+
+	platform_set_drvdata(pdev, sha_dd);
+
+	INIT_LIST_HEAD(&sha_dd->list);
+	spin_lock_init(&sha_dd->lock);
+
+	tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
+					(unsigned long)sha_dd);
+	tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
+					(unsigned long)sha_dd);
+
+	crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
+
+	/* Get the base address */
+	sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!sha_res) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto res_err;
+	}
+	sha_dd->phys_base = sha_res->start;
+
+	/* Get the IRQ */
+	sha_dd->irq = platform_get_irq(pdev,  0);
+	if (sha_dd->irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = sha_dd->irq;
+		goto res_err;
+	}
+
+	err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
+			       IRQF_SHARED, "atmel-sha", sha_dd);
+	if (err) {
+		dev_err(dev, "unable to request sha irq.\n");
+		goto res_err;
+	}
+
+	/* Initializing the clock */
+	sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
+	if (IS_ERR(sha_dd->iclk)) {
+		dev_err(dev, "clock initialization failed.\n");
+		err = PTR_ERR(sha_dd->iclk);
+		goto res_err;
+	}
+
+	sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
+	if (IS_ERR(sha_dd->io_base)) {
+		dev_err(dev, "can't ioremap\n");
+		err = PTR_ERR(sha_dd->io_base);
+		goto res_err;
+	}
+
+	err = clk_prepare(sha_dd->iclk);
+	if (err)
+		goto res_err;
+
+	atmel_sha_hw_version_init(sha_dd);
+
+	atmel_sha_get_cap(sha_dd);
+
+	if (sha_dd->caps.has_dma) {
+		pdata = pdev->dev.platform_data;
+		if (!pdata) {
+			pdata = atmel_sha_of_init(pdev);
+			if (IS_ERR(pdata)) {
+				dev_err(&pdev->dev, "platform data not available\n");
+				err = PTR_ERR(pdata);
+				goto iclk_unprepare;
+			}
+		}
+		if (!pdata->dma_slave) {
+			err = -ENXIO;
+			goto iclk_unprepare;
+		}
+		err = atmel_sha_dma_init(sha_dd, pdata);
+		if (err)
+			goto err_sha_dma;
+
+		dev_info(dev, "using %s for DMA transfers\n",
+				dma_chan_name(sha_dd->dma_lch_in.chan));
+	}
+
+	spin_lock(&atmel_sha.lock);
+	list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
+	spin_unlock(&atmel_sha.lock);
+
+	err = atmel_sha_register_algs(sha_dd);
+	if (err)
+		goto err_algs;
+
+	dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
+			sha_dd->caps.has_sha224 ? "/SHA224" : "",
+			sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
+
+	return 0;
+
+err_algs:
+	spin_lock(&atmel_sha.lock);
+	list_del(&sha_dd->list);
+	spin_unlock(&atmel_sha.lock);
+	if (sha_dd->caps.has_dma)
+		atmel_sha_dma_cleanup(sha_dd);
+err_sha_dma:
+iclk_unprepare:
+	clk_unprepare(sha_dd->iclk);
+res_err:
+	tasklet_kill(&sha_dd->queue_task);
+	tasklet_kill(&sha_dd->done_task);
+sha_dd_err:
+	dev_err(dev, "initialization failed.\n");
+
+	return err;
+}
+
+static int atmel_sha_remove(struct platform_device *pdev)
+{
+	struct atmel_sha_dev *sha_dd;
+
+	sha_dd = platform_get_drvdata(pdev);
+	if (!sha_dd)
+		return -ENODEV;
+	spin_lock(&atmel_sha.lock);
+	list_del(&sha_dd->list);
+	spin_unlock(&atmel_sha.lock);
+
+	atmel_sha_unregister_algs(sha_dd);
+
+	tasklet_kill(&sha_dd->queue_task);
+	tasklet_kill(&sha_dd->done_task);
+
+	if (sha_dd->caps.has_dma)
+		atmel_sha_dma_cleanup(sha_dd);
+
+	clk_unprepare(sha_dd->iclk);
+
+	return 0;
+}
+
+static struct platform_driver atmel_sha_driver = {
+	.probe		= atmel_sha_probe,
+	.remove		= atmel_sha_remove,
+	.driver		= {
+		.name	= "atmel_sha",
+		.of_match_table	= of_match_ptr(atmel_sha_dt_ids),
+	},
+};
+
+module_platform_driver(atmel_sha_driver);
+
+MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h
new file mode 100644
index 0000000..fbd9057
--- /dev/null
+++ b/drivers/crypto/atmel-tdes-regs.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ATMEL_TDES_REGS_H__
+#define __ATMEL_TDES_REGS_H__
+
+#define TDES_CR			0x00
+#define TDES_CR_START			(1 << 0)
+#define TDES_CR_SWRST			(1 << 8)
+#define TDES_CR_LOADSEED		(1 << 16)
+
+#define	TDES_MR			0x04
+#define TDES_MR_CYPHER_DEC		(0 << 0)
+#define TDES_MR_CYPHER_ENC		(1 << 0)
+#define TDES_MR_TDESMOD_MASK	(0x3 << 1)
+#define TDES_MR_TDESMOD_DES		(0x0 << 1)
+#define TDES_MR_TDESMOD_TDES	(0x1 << 1)
+#define TDES_MR_TDESMOD_XTEA	(0x2 << 1)
+#define TDES_MR_KEYMOD_3KEY		(0 << 4)
+#define TDES_MR_KEYMOD_2KEY		(1 << 4)
+#define TDES_MR_SMOD_MASK		(0x3 << 8)
+#define TDES_MR_SMOD_MANUAL		(0x0 << 8)
+#define TDES_MR_SMOD_AUTO		(0x1 << 8)
+#define TDES_MR_SMOD_PDC		(0x2 << 8)
+#define TDES_MR_OPMOD_MASK		(0x3 << 12)
+#define TDES_MR_OPMOD_ECB		(0x0 << 12)
+#define TDES_MR_OPMOD_CBC		(0x1 << 12)
+#define TDES_MR_OPMOD_OFB		(0x2 << 12)
+#define TDES_MR_OPMOD_CFB		(0x3 << 12)
+#define TDES_MR_LOD				(0x1 << 15)
+#define TDES_MR_CFBS_MASK		(0x3 << 16)
+#define TDES_MR_CFBS_64b		(0x0 << 16)
+#define TDES_MR_CFBS_32b		(0x1 << 16)
+#define TDES_MR_CFBS_16b		(0x2 << 16)
+#define TDES_MR_CFBS_8b			(0x3 << 16)
+#define TDES_MR_CKEY_MASK		(0xF << 20)
+#define TDES_MR_CKEY_OFFSET		20
+#define TDES_MR_CTYPE_MASK		(0x3F << 24)
+#define TDES_MR_CTYPE_OFFSET	24
+
+#define	TDES_IER		0x10
+#define	TDES_IDR		0x14
+#define	TDES_IMR		0x18
+#define	TDES_ISR		0x1C
+#define TDES_INT_DATARDY		(1 << 0)
+#define TDES_INT_ENDRX			(1 << 1)
+#define TDES_INT_ENDTX			(1 << 2)
+#define TDES_INT_RXBUFF			(1 << 3)
+#define TDES_INT_TXBUFE			(1 << 4)
+#define TDES_INT_URAD			(1 << 8)
+#define TDES_ISR_URAT_MASK		(0x3 << 12)
+#define TDES_ISR_URAT_IDR		(0x0 << 12)
+#define TDES_ISR_URAT_ODR		(0x1 << 12)
+#define TDES_ISR_URAT_MR		(0x2 << 12)
+#define TDES_ISR_URAT_WO		(0x3 << 12)
+
+
+#define	TDES_KEY1W1R	0x20
+#define	TDES_KEY1W2R	0x24
+#define	TDES_KEY2W1R	0x28
+#define	TDES_KEY2W2R	0x2C
+#define	TDES_KEY3W1R	0x30
+#define	TDES_KEY3W2R	0x34
+#define	TDES_IDATA1R	0x40
+#define	TDES_IDATA2R	0x44
+#define	TDES_ODATA1R	0x50
+#define	TDES_ODATA2R	0x54
+#define	TDES_IV1R		0x60
+#define	TDES_IV2R		0x64
+
+#define	TDES_XTEARNDR	0x70
+#define	TDES_XTEARNDR_XTEA_RNDS_MASK	(0x3F << 0)
+#define	TDES_XTEARNDR_XTEA_RNDS_OFFSET	0
+
+#define	TDES_HW_VERSION	0xFC
+
+#define TDES_RPR		0x100
+#define TDES_RCR		0x104
+#define TDES_TPR		0x108
+#define TDES_TCR		0x10C
+#define TDES_RNPR		0x118
+#define TDES_RNCR		0x11C
+#define TDES_TNPR		0x118
+#define TDES_TNCR		0x11C
+#define TDES_PTCR		0x120
+#define TDES_PTCR_RXTEN			(1 << 0)
+#define TDES_PTCR_RXTDIS		(1 << 1)
+#define TDES_PTCR_TXTEN			(1 << 8)
+#define TDES_PTCR_TXTDIS		(1 << 9)
+#define TDES_PTSR		0x124
+#define TDES_PTSR_RXTEN			(1 << 0)
+#define TDES_PTSR_TXTEN			(1 << 8)
+
+#endif /* __ATMEL_TDES_REGS_H__ */
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
new file mode 100644
index 0000000..97b0423
--- /dev/null
+++ b/drivers/crypto/atmel-tdes.c
@@ -0,0 +1,1499 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL DES/TDES HW acceleration.
+ *
+ * Copyright (c) 2012 Eukréa Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-aes.c drivers.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/platform_data/crypto-atmel.h>
+#include "atmel-tdes-regs.h"
+
+/* TDES flags  */
+#define TDES_FLAGS_MODE_MASK		0x00ff
+#define TDES_FLAGS_ENCRYPT	BIT(0)
+#define TDES_FLAGS_CBC		BIT(1)
+#define TDES_FLAGS_CFB		BIT(2)
+#define TDES_FLAGS_CFB8		BIT(3)
+#define TDES_FLAGS_CFB16	BIT(4)
+#define TDES_FLAGS_CFB32	BIT(5)
+#define TDES_FLAGS_CFB64	BIT(6)
+#define TDES_FLAGS_OFB		BIT(7)
+
+#define TDES_FLAGS_INIT		BIT(16)
+#define TDES_FLAGS_FAST		BIT(17)
+#define TDES_FLAGS_BUSY		BIT(18)
+#define TDES_FLAGS_DMA		BIT(19)
+
+#define ATMEL_TDES_QUEUE_LENGTH	50
+
+#define CFB8_BLOCK_SIZE		1
+#define CFB16_BLOCK_SIZE	2
+#define CFB32_BLOCK_SIZE	4
+
+struct atmel_tdes_caps {
+	bool	has_dma;
+	u32		has_cfb_3keys;
+};
+
+struct atmel_tdes_dev;
+
+struct atmel_tdes_ctx {
+	struct atmel_tdes_dev *dd;
+
+	int		keylen;
+	u32		key[3*DES_KEY_SIZE / sizeof(u32)];
+	unsigned long	flags;
+
+	u16		block_size;
+};
+
+struct atmel_tdes_reqctx {
+	unsigned long mode;
+};
+
+struct atmel_tdes_dma {
+	struct dma_chan			*chan;
+	struct dma_slave_config dma_conf;
+};
+
+struct atmel_tdes_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	void __iomem		*io_base;
+
+	struct atmel_tdes_ctx	*ctx;
+	struct device		*dev;
+	struct clk			*iclk;
+	int					irq;
+
+	unsigned long		flags;
+	int			err;
+
+	spinlock_t		lock;
+	struct crypto_queue	queue;
+
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
+
+	struct ablkcipher_request	*req;
+	size_t				total;
+
+	struct scatterlist	*in_sg;
+	unsigned int		nb_in_sg;
+	size_t				in_offset;
+	struct scatterlist	*out_sg;
+	unsigned int		nb_out_sg;
+	size_t				out_offset;
+
+	size_t	buflen;
+	size_t	dma_size;
+
+	void	*buf_in;
+	int		dma_in;
+	dma_addr_t	dma_addr_in;
+	struct atmel_tdes_dma	dma_lch_in;
+
+	void	*buf_out;
+	int		dma_out;
+	dma_addr_t	dma_addr_out;
+	struct atmel_tdes_dma	dma_lch_out;
+
+	struct atmel_tdes_caps	caps;
+
+	u32	hw_version;
+};
+
+struct atmel_tdes_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock;
+};
+
+static struct atmel_tdes_drv atmel_tdes = {
+	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
+};
+
+static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
+			void *buf, size_t buflen, size_t total, int out)
+{
+	size_t count, off = 0;
+
+	while (buflen && total) {
+		count = min((*sg)->length - *offset, total);
+		count = min(count, buflen);
+
+		if (!count)
+			return off;
+
+		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
+
+		off += count;
+		buflen -= count;
+		*offset += count;
+		total -= count;
+
+		if (*offset == (*sg)->length) {
+			*sg = sg_next(*sg);
+			if (*sg)
+				*offset = 0;
+			else
+				total = 0;
+		}
+	}
+
+	return off;
+}
+
+static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
+{
+	return readl_relaxed(dd->io_base + offset);
+}
+
+static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
+					u32 offset, u32 value)
+{
+	writel_relaxed(value, dd->io_base + offset);
+}
+
+static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
+					u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		atmel_tdes_write(dd, offset, *value);
+}
+
+static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
+{
+	struct atmel_tdes_dev *tdes_dd = NULL;
+	struct atmel_tdes_dev *tmp;
+
+	spin_lock_bh(&atmel_tdes.lock);
+	if (!ctx->dd) {
+		list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
+			tdes_dd = tmp;
+			break;
+		}
+		ctx->dd = tdes_dd;
+	} else {
+		tdes_dd = ctx->dd;
+	}
+	spin_unlock_bh(&atmel_tdes.lock);
+
+	return tdes_dd;
+}
+
+static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
+{
+	int err;
+
+	err = clk_prepare_enable(dd->iclk);
+	if (err)
+		return err;
+
+	if (!(dd->flags & TDES_FLAGS_INIT)) {
+		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
+		dd->flags |= TDES_FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
+{
+	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
+}
+
+static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+{
+	atmel_tdes_hw_init(dd);
+
+	dd->hw_version = atmel_tdes_get_version(dd);
+
+	dev_info(dd->dev,
+			"version: 0x%x\n", dd->hw_version);
+
+	clk_disable_unprepare(dd->iclk);
+}
+
+static void atmel_tdes_dma_callback(void *data)
+{
+	struct atmel_tdes_dev *dd = data;
+
+	/* dma_lch_out - completed */
+	tasklet_schedule(&dd->done_task);
+}
+
+static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
+{
+	int err;
+	u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
+
+	err = atmel_tdes_hw_init(dd);
+
+	if (err)
+		return err;
+
+	if (!dd->caps.has_dma)
+		atmel_tdes_write(dd, TDES_PTCR,
+			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
+
+	/* MR register must be set before IV registers */
+	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
+		valmr |= TDES_MR_KEYMOD_3KEY;
+		valmr |= TDES_MR_TDESMOD_TDES;
+	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
+		valmr |= TDES_MR_KEYMOD_2KEY;
+		valmr |= TDES_MR_TDESMOD_TDES;
+	} else {
+		valmr |= TDES_MR_TDESMOD_DES;
+	}
+
+	if (dd->flags & TDES_FLAGS_CBC) {
+		valmr |= TDES_MR_OPMOD_CBC;
+	} else if (dd->flags & TDES_FLAGS_CFB) {
+		valmr |= TDES_MR_OPMOD_CFB;
+
+		if (dd->flags & TDES_FLAGS_CFB8)
+			valmr |= TDES_MR_CFBS_8b;
+		else if (dd->flags & TDES_FLAGS_CFB16)
+			valmr |= TDES_MR_CFBS_16b;
+		else if (dd->flags & TDES_FLAGS_CFB32)
+			valmr |= TDES_MR_CFBS_32b;
+		else if (dd->flags & TDES_FLAGS_CFB64)
+			valmr |= TDES_MR_CFBS_64b;
+	} else if (dd->flags & TDES_FLAGS_OFB) {
+		valmr |= TDES_MR_OPMOD_OFB;
+	}
+
+	if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
+		valmr |= TDES_MR_CYPHER_ENC;
+
+	atmel_tdes_write(dd, TDES_CR, valcr);
+	atmel_tdes_write(dd, TDES_MR, valmr);
+
+	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
+						dd->ctx->keylen >> 2);
+
+	if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
+		(dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
+		atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
+	}
+
+	return 0;
+}
+
+static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
+{
+	int err = 0;
+	size_t count;
+
+	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+
+	if (dd->flags & TDES_FLAGS_FAST) {
+		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+	} else {
+		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+					   dd->dma_size, DMA_FROM_DEVICE);
+
+		/* copy data */
+		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
+				dd->buf_out, dd->buflen, dd->dma_size, 1);
+		if (count != dd->dma_size) {
+			err = -EINVAL;
+			pr_err("not all data converted: %zu\n", count);
+		}
+	}
+
+	return err;
+}
+
+static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
+{
+	int err = -ENOMEM;
+
+	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
+	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
+	dd->buflen = PAGE_SIZE;
+	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
+
+	if (!dd->buf_in || !dd->buf_out) {
+		dev_err(dd->dev, "unable to alloc pages.\n");
+		goto err_alloc;
+	}
+
+	/* MAP here */
+	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
+					dd->buflen, DMA_TO_DEVICE);
+	if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
+		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
+		err = -EINVAL;
+		goto err_map_in;
+	}
+
+	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
+					dd->buflen, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
+		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
+		err = -EINVAL;
+		goto err_map_out;
+	}
+
+	return 0;
+
+err_map_out:
+	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+		DMA_TO_DEVICE);
+err_map_in:
+err_alloc:
+	free_page((unsigned long)dd->buf_out);
+	free_page((unsigned long)dd->buf_in);
+	if (err)
+		pr_err("error: %d\n", err);
+	return err;
+}
+
+static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
+{
+	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
+			 DMA_FROM_DEVICE);
+	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+		DMA_TO_DEVICE);
+	free_page((unsigned long)dd->buf_out);
+	free_page((unsigned long)dd->buf_in);
+}
+
+static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+			       dma_addr_t dma_addr_out, int length)
+{
+	struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct atmel_tdes_dev *dd = ctx->dd;
+	int len32;
+
+	dd->dma_size = length;
+
+	if (!(dd->flags & TDES_FLAGS_FAST)) {
+		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+					   DMA_TO_DEVICE);
+	}
+
+	if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
+		len32 = DIV_ROUND_UP(length, sizeof(u8));
+	else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
+		len32 = DIV_ROUND_UP(length, sizeof(u16));
+	else
+		len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
+	atmel_tdes_write(dd, TDES_TCR, len32);
+	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
+	atmel_tdes_write(dd, TDES_RCR, len32);
+
+	/* Enable Interrupt */
+	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
+
+	/* Start DMA transfer */
+	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
+
+	return 0;
+}
+
+static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+			       dma_addr_t dma_addr_out, int length)
+{
+	struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct atmel_tdes_dev *dd = ctx->dd;
+	struct scatterlist sg[2];
+	struct dma_async_tx_descriptor	*in_desc, *out_desc;
+
+	dd->dma_size = length;
+
+	if (!(dd->flags & TDES_FLAGS_FAST)) {
+		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+					   DMA_TO_DEVICE);
+	}
+
+	if (dd->flags & TDES_FLAGS_CFB8) {
+		dd->dma_lch_in.dma_conf.dst_addr_width =
+			DMA_SLAVE_BUSWIDTH_1_BYTE;
+		dd->dma_lch_out.dma_conf.src_addr_width =
+			DMA_SLAVE_BUSWIDTH_1_BYTE;
+	} else if (dd->flags & TDES_FLAGS_CFB16) {
+		dd->dma_lch_in.dma_conf.dst_addr_width =
+			DMA_SLAVE_BUSWIDTH_2_BYTES;
+		dd->dma_lch_out.dma_conf.src_addr_width =
+			DMA_SLAVE_BUSWIDTH_2_BYTES;
+	} else {
+		dd->dma_lch_in.dma_conf.dst_addr_width =
+			DMA_SLAVE_BUSWIDTH_4_BYTES;
+		dd->dma_lch_out.dma_conf.src_addr_width =
+			DMA_SLAVE_BUSWIDTH_4_BYTES;
+	}
+
+	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+
+	dd->flags |= TDES_FLAGS_DMA;
+
+	sg_init_table(&sg[0], 1);
+	sg_dma_address(&sg[0]) = dma_addr_in;
+	sg_dma_len(&sg[0]) = length;
+
+	sg_init_table(&sg[1], 1);
+	sg_dma_address(&sg[1]) = dma_addr_out;
+	sg_dma_len(&sg[1]) = length;
+
+	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
+				1, DMA_MEM_TO_DEV,
+				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
+	if (!in_desc)
+		return -EINVAL;
+
+	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
+				1, DMA_DEV_TO_MEM,
+				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!out_desc)
+		return -EINVAL;
+
+	out_desc->callback = atmel_tdes_dma_callback;
+	out_desc->callback_param = dd;
+
+	dmaengine_submit(out_desc);
+	dma_async_issue_pending(dd->dma_lch_out.chan);
+
+	dmaengine_submit(in_desc);
+	dma_async_issue_pending(dd->dma_lch_in.chan);
+
+	return 0;
+}
+
+static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
+					crypto_ablkcipher_reqtfm(dd->req));
+	int err, fast = 0, in, out;
+	size_t count;
+	dma_addr_t addr_in, addr_out;
+
+	if ((!dd->in_offset) && (!dd->out_offset)) {
+		/* check for alignment */
+		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
+			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
+		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
+			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
+		fast = in && out;
+
+		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
+			fast = 0;
+	}
+
+
+	if (fast)  {
+		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
+		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
+
+		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+		if (!err) {
+			dev_err(dd->dev, "dma_map_sg() error\n");
+			return -EINVAL;
+		}
+
+		err = dma_map_sg(dd->dev, dd->out_sg, 1,
+				DMA_FROM_DEVICE);
+		if (!err) {
+			dev_err(dd->dev, "dma_map_sg() error\n");
+			dma_unmap_sg(dd->dev, dd->in_sg, 1,
+				DMA_TO_DEVICE);
+			return -EINVAL;
+		}
+
+		addr_in = sg_dma_address(dd->in_sg);
+		addr_out = sg_dma_address(dd->out_sg);
+
+		dd->flags |= TDES_FLAGS_FAST;
+
+	} else {
+		/* use cache buffers */
+		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
+				dd->buf_in, dd->buflen, dd->total, 0);
+
+		addr_in = dd->dma_addr_in;
+		addr_out = dd->dma_addr_out;
+
+		dd->flags &= ~TDES_FLAGS_FAST;
+	}
+
+	dd->total -= count;
+
+	if (dd->caps.has_dma)
+		err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+	else
+		err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+
+	if (err && (dd->flags & TDES_FLAGS_FAST)) {
+		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+	}
+
+	return err;
+}
+
+static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
+{
+	struct ablkcipher_request *req = dd->req;
+
+	clk_disable_unprepare(dd->iclk);
+
+	dd->flags &= ~TDES_FLAGS_BUSY;
+
+	req->base.complete(&req->base, err);
+}
+
+static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
+			       struct ablkcipher_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct atmel_tdes_ctx *ctx;
+	struct atmel_tdes_reqctx *rctx;
+	unsigned long flags;
+	int err, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ablkcipher_enqueue_request(&dd->queue, req);
+	if (dd->flags & TDES_FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+	backlog = crypto_get_backlog(&dd->queue);
+	async_req = crypto_dequeue_request(&dd->queue);
+	if (async_req)
+		dd->flags |= TDES_FLAGS_BUSY;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ablkcipher_request_cast(async_req);
+
+	/* assign new request to device */
+	dd->req = req;
+	dd->total = req->nbytes;
+	dd->in_offset = 0;
+	dd->in_sg = req->src;
+	dd->out_offset = 0;
+	dd->out_sg = req->dst;
+
+	rctx = ablkcipher_request_ctx(req);
+	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+	rctx->mode &= TDES_FLAGS_MODE_MASK;
+	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
+	dd->ctx = ctx;
+	ctx->dd = dd;
+
+	err = atmel_tdes_write_ctrl(dd);
+	if (!err)
+		err = atmel_tdes_crypt_start(dd);
+	if (err) {
+		/* des_task will not finish it, so do it here */
+		atmel_tdes_finish_req(dd, err);
+		tasklet_schedule(&dd->queue_task);
+	}
+
+	return ret;
+}
+
+static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+{
+	int err = -EINVAL;
+	size_t count;
+
+	if (dd->flags & TDES_FLAGS_DMA) {
+		err = 0;
+		if  (dd->flags & TDES_FLAGS_FAST) {
+			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+		} else {
+			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+				dd->dma_size, DMA_FROM_DEVICE);
+
+			/* copy data */
+			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
+				dd->buf_out, dd->buflen, dd->dma_size, 1);
+			if (count != dd->dma_size) {
+				err = -EINVAL;
+				pr_err("not all data converted: %zu\n", count);
+			}
+		}
+	}
+	return err;
+}
+
+static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
+
+	if (mode & TDES_FLAGS_CFB8) {
+		if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+			pr_err("request size is not exact amount of CFB8 blocks\n");
+			return -EINVAL;
+		}
+		ctx->block_size = CFB8_BLOCK_SIZE;
+	} else if (mode & TDES_FLAGS_CFB16) {
+		if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+			pr_err("request size is not exact amount of CFB16 blocks\n");
+			return -EINVAL;
+		}
+		ctx->block_size = CFB16_BLOCK_SIZE;
+	} else if (mode & TDES_FLAGS_CFB32) {
+		if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+			pr_err("request size is not exact amount of CFB32 blocks\n");
+			return -EINVAL;
+		}
+		ctx->block_size = CFB32_BLOCK_SIZE;
+	} else {
+		if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+			pr_err("request size is not exact amount of DES blocks\n");
+			return -EINVAL;
+		}
+		ctx->block_size = DES_BLOCK_SIZE;
+	}
+
+	rctx->mode = mode;
+
+	return atmel_tdes_handle_queue(ctx->dd, req);
+}
+
+static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+{
+	struct at_dma_slave	*sl = slave;
+
+	if (sl && sl->dma_dev == chan->device->dev) {
+		chan->private = sl;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
+			struct crypto_platform_data *pdata)
+{
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* Try to grab 2 DMA channels */
+	dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
+			atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
+	if (!dd->dma_lch_in.chan)
+		goto err_dma_in;
+
+	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+		TDES_IDATA1R;
+	dd->dma_lch_in.dma_conf.src_maxburst = 1;
+	dd->dma_lch_in.dma_conf.src_addr_width =
+		DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+	dd->dma_lch_in.dma_conf.dst_addr_width =
+		DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dd->dma_lch_in.dma_conf.device_fc = false;
+
+	dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
+			atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
+	if (!dd->dma_lch_out.chan)
+		goto err_dma_out;
+
+	dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+		TDES_ODATA1R;
+	dd->dma_lch_out.dma_conf.src_maxburst = 1;
+	dd->dma_lch_out.dma_conf.src_addr_width =
+		DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+	dd->dma_lch_out.dma_conf.dst_addr_width =
+		DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dd->dma_lch_out.dma_conf.device_fc = false;
+
+	return 0;
+
+err_dma_out:
+	dma_release_channel(dd->dma_lch_in.chan);
+err_dma_in:
+	dev_warn(dd->dev, "no DMA channel available\n");
+	return -ENODEV;
+}
+
+static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+{
+	dma_release_channel(dd->dma_lch_in.chan);
+	dma_release_channel(dd->dma_lch_out.chan);
+}
+
+static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	u32 tmp[DES_EXPKEY_WORDS];
+	int err;
+	struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
+
+	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	if (keylen != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	err = des_ekey(tmp, key);
+	if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	const char *alg_name;
+
+	alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
+
+	/*
+	 * HW bug in cfb 3-keys mode.
+	 */
+	if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
+			&& (keylen != 2*DES_KEY_SIZE)) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	} else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
+}
+
+static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, 0);
+}
+
+static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
+}
+
+static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
+}
+static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
+}
+
+static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
+}
+
+static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+						TDES_FLAGS_CFB8);
+}
+
+static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
+}
+
+static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+						TDES_FLAGS_CFB16);
+}
+
+static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
+}
+
+static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+						TDES_FLAGS_CFB32);
+}
+
+static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
+}
+
+static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
+}
+
+static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
+}
+
+static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
+{
+	struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct atmel_tdes_dev *dd;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+
+	dd = atmel_tdes_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
+
+	return 0;
+}
+
+static struct crypto_alg tdes_algs[] = {
+{
+	.cra_name		= "ecb(des)",
+	.cra_driver_name	= "atmel-ecb-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_ecb_encrypt,
+		.decrypt	= atmel_tdes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(des)",
+	.cra_driver_name	= "atmel-cbc-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cbc_encrypt,
+		.decrypt	= atmel_tdes_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb(des)",
+	.cra_driver_name	= "atmel-cfb-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cfb_encrypt,
+		.decrypt	= atmel_tdes_cfb_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb8(des)",
+	.cra_driver_name	= "atmel-cfb8-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB8_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cfb8_encrypt,
+		.decrypt	= atmel_tdes_cfb8_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb16(des)",
+	.cra_driver_name	= "atmel-cfb16-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB16_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x1,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cfb16_encrypt,
+		.decrypt	= atmel_tdes_cfb16_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb32(des)",
+	.cra_driver_name	= "atmel-cfb32-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB32_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x3,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_cfb32_encrypt,
+		.decrypt	= atmel_tdes_cfb32_decrypt,
+	}
+},
+{
+	.cra_name		= "ofb(des)",
+	.cra_driver_name	= "atmel-ofb-des",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_des_setkey,
+		.encrypt	= atmel_tdes_ofb_encrypt,
+		.decrypt	= atmel_tdes_ofb_decrypt,
+	}
+},
+{
+	.cra_name		= "ecb(des3_ede)",
+	.cra_driver_name	= "atmel-ecb-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2 * DES_KEY_SIZE,
+		.max_keysize	= 3 * DES_KEY_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_ecb_encrypt,
+		.decrypt	= atmel_tdes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(des3_ede)",
+	.cra_driver_name	= "atmel-cbc-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 3*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cbc_encrypt,
+		.decrypt	= atmel_tdes_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb(des3_ede)",
+	.cra_driver_name	= "atmel-cfb-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 2*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cfb_encrypt,
+		.decrypt	= atmel_tdes_cfb_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb8(des3_ede)",
+	.cra_driver_name	= "atmel-cfb8-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB8_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 2*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cfb8_encrypt,
+		.decrypt	= atmel_tdes_cfb8_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb16(des3_ede)",
+	.cra_driver_name	= "atmel-cfb16-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB16_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x1,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 2*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cfb16_encrypt,
+		.decrypt	= atmel_tdes_cfb16_decrypt,
+	}
+},
+{
+	.cra_name		= "cfb32(des3_ede)",
+	.cra_driver_name	= "atmel-cfb32-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= CFB32_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x3,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 2*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_cfb32_encrypt,
+		.decrypt	= atmel_tdes_cfb32_decrypt,
+	}
+},
+{
+	.cra_name		= "ofb(des3_ede)",
+	.cra_driver_name	= "atmel-ofb-tdes",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
+	.cra_alignmask		= 0x7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_tdes_cra_init,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2*DES_KEY_SIZE,
+		.max_keysize	= 3*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= atmel_tdes_setkey,
+		.encrypt	= atmel_tdes_ofb_encrypt,
+		.decrypt	= atmel_tdes_ofb_decrypt,
+	}
+},
+};
+
+static void atmel_tdes_queue_task(unsigned long data)
+{
+	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
+
+	atmel_tdes_handle_queue(dd, NULL);
+}
+
+static void atmel_tdes_done_task(unsigned long data)
+{
+	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
+	int err;
+
+	if (!(dd->flags & TDES_FLAGS_DMA))
+		err = atmel_tdes_crypt_pdc_stop(dd);
+	else
+		err = atmel_tdes_crypt_dma_stop(dd);
+
+	err = dd->err ? : err;
+
+	if (dd->total && !err) {
+		if (dd->flags & TDES_FLAGS_FAST) {
+			dd->in_sg = sg_next(dd->in_sg);
+			dd->out_sg = sg_next(dd->out_sg);
+			if (!dd->in_sg || !dd->out_sg)
+				err = -EINVAL;
+		}
+		if (!err)
+			err = atmel_tdes_crypt_start(dd);
+		if (!err)
+			return; /* DMA started. Not fininishing. */
+	}
+
+	atmel_tdes_finish_req(dd, err);
+	atmel_tdes_handle_queue(dd, NULL);
+}
+
+static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
+{
+	struct atmel_tdes_dev *tdes_dd = dev_id;
+	u32 reg;
+
+	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
+	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
+		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
+		if (TDES_FLAGS_BUSY & tdes_dd->flags)
+			tasklet_schedule(&tdes_dd->done_task);
+		else
+			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
+		crypto_unregister_alg(&tdes_algs[i]);
+}
+
+static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
+{
+	int err, i, j;
+
+	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
+		err = crypto_register_alg(&tdes_algs[i]);
+		if (err)
+			goto err_tdes_algs;
+	}
+
+	return 0;
+
+err_tdes_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_alg(&tdes_algs[j]);
+
+	return err;
+}
+
+static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
+{
+
+	dd->caps.has_dma = 0;
+	dd->caps.has_cfb_3keys = 0;
+
+	/* keep only major version number */
+	switch (dd->hw_version & 0xf00) {
+	case 0x700:
+		dd->caps.has_dma = 1;
+		dd->caps.has_cfb_3keys = 1;
+		break;
+	case 0x600:
+		break;
+	default:
+		dev_warn(dd->dev,
+				"Unmanaged tdes version, set minimum capabilities\n");
+		break;
+	}
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_tdes_dt_ids[] = {
+	{ .compatible = "atmel,at91sam9g46-tdes" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
+
+static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct crypto_platform_data *pdata;
+
+	if (!np) {
+		dev_err(&pdev->dev, "device node not found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->dma_slave = devm_kzalloc(&pdev->dev,
+					sizeof(*(pdata->dma_slave)),
+					GFP_KERNEL);
+	if (!pdata->dma_slave)
+		return ERR_PTR(-ENOMEM);
+
+	return pdata;
+}
+#else /* CONFIG_OF */
+static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
+{
+	return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int atmel_tdes_probe(struct platform_device *pdev)
+{
+	struct atmel_tdes_dev *tdes_dd;
+	struct crypto_platform_data	*pdata;
+	struct device *dev = &pdev->dev;
+	struct resource *tdes_res;
+	int err;
+
+	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
+	if (tdes_dd == NULL) {
+		err = -ENOMEM;
+		goto tdes_dd_err;
+	}
+
+	tdes_dd->dev = dev;
+
+	platform_set_drvdata(pdev, tdes_dd);
+
+	INIT_LIST_HEAD(&tdes_dd->list);
+	spin_lock_init(&tdes_dd->lock);
+
+	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
+					(unsigned long)tdes_dd);
+	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
+					(unsigned long)tdes_dd);
+
+	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
+
+	/* Get the base address */
+	tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!tdes_res) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto res_err;
+	}
+	tdes_dd->phys_base = tdes_res->start;
+
+	/* Get the IRQ */
+	tdes_dd->irq = platform_get_irq(pdev,  0);
+	if (tdes_dd->irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = tdes_dd->irq;
+		goto res_err;
+	}
+
+	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
+			       IRQF_SHARED, "atmel-tdes", tdes_dd);
+	if (err) {
+		dev_err(dev, "unable to request tdes irq.\n");
+		goto res_err;
+	}
+
+	/* Initializing the clock */
+	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
+	if (IS_ERR(tdes_dd->iclk)) {
+		dev_err(dev, "clock initialization failed.\n");
+		err = PTR_ERR(tdes_dd->iclk);
+		goto res_err;
+	}
+
+	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
+	if (IS_ERR(tdes_dd->io_base)) {
+		dev_err(dev, "can't ioremap\n");
+		err = PTR_ERR(tdes_dd->io_base);
+		goto res_err;
+	}
+
+	atmel_tdes_hw_version_init(tdes_dd);
+
+	atmel_tdes_get_cap(tdes_dd);
+
+	err = atmel_tdes_buff_init(tdes_dd);
+	if (err)
+		goto err_tdes_buff;
+
+	if (tdes_dd->caps.has_dma) {
+		pdata = pdev->dev.platform_data;
+		if (!pdata) {
+			pdata = atmel_tdes_of_init(pdev);
+			if (IS_ERR(pdata)) {
+				dev_err(&pdev->dev, "platform data not available\n");
+				err = PTR_ERR(pdata);
+				goto err_pdata;
+			}
+		}
+		if (!pdata->dma_slave) {
+			err = -ENXIO;
+			goto err_pdata;
+		}
+		err = atmel_tdes_dma_init(tdes_dd, pdata);
+		if (err)
+			goto err_tdes_dma;
+
+		dev_info(dev, "using %s, %s for DMA transfers\n",
+				dma_chan_name(tdes_dd->dma_lch_in.chan),
+				dma_chan_name(tdes_dd->dma_lch_out.chan));
+	}
+
+	spin_lock(&atmel_tdes.lock);
+	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
+	spin_unlock(&atmel_tdes.lock);
+
+	err = atmel_tdes_register_algs(tdes_dd);
+	if (err)
+		goto err_algs;
+
+	dev_info(dev, "Atmel DES/TDES\n");
+
+	return 0;
+
+err_algs:
+	spin_lock(&atmel_tdes.lock);
+	list_del(&tdes_dd->list);
+	spin_unlock(&atmel_tdes.lock);
+	if (tdes_dd->caps.has_dma)
+		atmel_tdes_dma_cleanup(tdes_dd);
+err_tdes_dma:
+err_pdata:
+	atmel_tdes_buff_cleanup(tdes_dd);
+err_tdes_buff:
+res_err:
+	tasklet_kill(&tdes_dd->done_task);
+	tasklet_kill(&tdes_dd->queue_task);
+tdes_dd_err:
+	dev_err(dev, "initialization failed.\n");
+
+	return err;
+}
+
+static int atmel_tdes_remove(struct platform_device *pdev)
+{
+	struct atmel_tdes_dev *tdes_dd;
+
+	tdes_dd = platform_get_drvdata(pdev);
+	if (!tdes_dd)
+		return -ENODEV;
+	spin_lock(&atmel_tdes.lock);
+	list_del(&tdes_dd->list);
+	spin_unlock(&atmel_tdes.lock);
+
+	atmel_tdes_unregister_algs(tdes_dd);
+
+	tasklet_kill(&tdes_dd->done_task);
+	tasklet_kill(&tdes_dd->queue_task);
+
+	if (tdes_dd->caps.has_dma)
+		atmel_tdes_dma_cleanup(tdes_dd);
+
+	atmel_tdes_buff_cleanup(tdes_dd);
+
+	return 0;
+}
+
+static struct platform_driver atmel_tdes_driver = {
+	.probe		= atmel_tdes_probe,
+	.remove		= atmel_tdes_remove,
+	.driver		= {
+		.name	= "atmel_tdes",
+		.of_match_table = of_match_ptr(atmel_tdes_dt_ids),
+	},
+};
+
+module_platform_driver(atmel_tdes_driver);
+
+MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/axis/Makefile b/drivers/crypto/axis/Makefile
new file mode 100644
index 0000000..be9a84a
--- /dev/null
+++ b/drivers/crypto/axis/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) := artpec6_crypto.o
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
new file mode 100644
index 0000000..7f07a50
--- /dev/null
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -0,0 +1,3186 @@
+/*
+ *   Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
+ *
+ *    Copyright (C) 2014-2017  Axis Communications AB
+ */
+#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fault-inject.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/xts.h>
+
+/* Max length of a line in all cache levels for Artpec SoCs. */
+#define ARTPEC_CACHE_LINE_MAX	32
+
+#define PDMA_OUT_CFG		0x0000
+#define PDMA_OUT_BUF_CFG	0x0004
+#define PDMA_OUT_CMD		0x0008
+#define PDMA_OUT_DESCRQ_PUSH	0x0010
+#define PDMA_OUT_DESCRQ_STAT	0x0014
+
+#define A6_PDMA_IN_CFG		0x0028
+#define A6_PDMA_IN_BUF_CFG	0x002c
+#define A6_PDMA_IN_CMD		0x0030
+#define A6_PDMA_IN_STATQ_PUSH	0x0038
+#define A6_PDMA_IN_DESCRQ_PUSH	0x0044
+#define A6_PDMA_IN_DESCRQ_STAT	0x0048
+#define A6_PDMA_INTR_MASK	0x0068
+#define A6_PDMA_ACK_INTR	0x006c
+#define A6_PDMA_MASKED_INTR	0x0074
+
+#define A7_PDMA_IN_CFG		0x002c
+#define A7_PDMA_IN_BUF_CFG	0x0030
+#define A7_PDMA_IN_CMD		0x0034
+#define A7_PDMA_IN_STATQ_PUSH	0x003c
+#define A7_PDMA_IN_DESCRQ_PUSH	0x0048
+#define A7_PDMA_IN_DESCRQ_STAT	0x004C
+#define A7_PDMA_INTR_MASK	0x006c
+#define A7_PDMA_ACK_INTR	0x0070
+#define A7_PDMA_MASKED_INTR	0x0078
+
+#define PDMA_OUT_CFG_EN				BIT(0)
+
+#define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
+#define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
+
+#define PDMA_OUT_CMD_START			BIT(0)
+#define A6_PDMA_OUT_CMD_STOP			BIT(3)
+#define A7_PDMA_OUT_CMD_STOP			BIT(2)
+
+#define PDMA_OUT_DESCRQ_PUSH_LEN		GENMASK(5, 0)
+#define PDMA_OUT_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
+
+#define PDMA_OUT_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
+#define PDMA_OUT_DESCRQ_STAT_SIZE		GENMASK(7, 4)
+
+#define PDMA_IN_CFG_EN				BIT(0)
+
+#define PDMA_IN_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
+#define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
+#define PDMA_IN_BUF_CFG_STAT_BUF_SIZE		GENMASK(14, 10)
+
+#define PDMA_IN_CMD_START			BIT(0)
+#define A6_PDMA_IN_CMD_FLUSH_STAT		BIT(2)
+#define A6_PDMA_IN_CMD_STOP			BIT(3)
+#define A7_PDMA_IN_CMD_FLUSH_STAT		BIT(1)
+#define A7_PDMA_IN_CMD_STOP			BIT(2)
+
+#define PDMA_IN_STATQ_PUSH_LEN			GENMASK(5, 0)
+#define PDMA_IN_STATQ_PUSH_ADDR			GENMASK(31, 6)
+
+#define PDMA_IN_DESCRQ_PUSH_LEN			GENMASK(5, 0)
+#define PDMA_IN_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
+
+#define PDMA_IN_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
+#define PDMA_IN_DESCRQ_STAT_SIZE		GENMASK(7, 4)
+
+#define A6_PDMA_INTR_MASK_IN_DATA		BIT(2)
+#define A6_PDMA_INTR_MASK_IN_EOP		BIT(3)
+#define A6_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(4)
+
+#define A7_PDMA_INTR_MASK_IN_DATA		BIT(3)
+#define A7_PDMA_INTR_MASK_IN_EOP		BIT(4)
+#define A7_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(5)
+
+#define A6_CRY_MD_OPER		GENMASK(19, 16)
+
+#define A6_CRY_MD_HASH_SEL_CTX	GENMASK(21, 20)
+#define A6_CRY_MD_HASH_HMAC_FIN	BIT(23)
+
+#define A6_CRY_MD_CIPHER_LEN	GENMASK(21, 20)
+#define A6_CRY_MD_CIPHER_DECR	BIT(22)
+#define A6_CRY_MD_CIPHER_TWEAK	BIT(23)
+#define A6_CRY_MD_CIPHER_DSEQ	BIT(24)
+
+#define A7_CRY_MD_OPER		GENMASK(11, 8)
+
+#define A7_CRY_MD_HASH_SEL_CTX	GENMASK(13, 12)
+#define A7_CRY_MD_HASH_HMAC_FIN	BIT(15)
+
+#define A7_CRY_MD_CIPHER_LEN	GENMASK(13, 12)
+#define A7_CRY_MD_CIPHER_DECR	BIT(14)
+#define A7_CRY_MD_CIPHER_TWEAK	BIT(15)
+#define A7_CRY_MD_CIPHER_DSEQ	BIT(16)
+
+/* DMA metadata constants */
+#define regk_crypto_aes_cbc     0x00000002
+#define regk_crypto_aes_ctr     0x00000003
+#define regk_crypto_aes_ecb     0x00000001
+#define regk_crypto_aes_gcm     0x00000004
+#define regk_crypto_aes_xts     0x00000005
+#define regk_crypto_cache       0x00000002
+#define a6_regk_crypto_dlkey    0x0000000a
+#define a7_regk_crypto_dlkey    0x0000000e
+#define regk_crypto_ext         0x00000001
+#define regk_crypto_hmac_sha1   0x00000007
+#define regk_crypto_hmac_sha256 0x00000009
+#define regk_crypto_hmac_sha384 0x0000000b
+#define regk_crypto_hmac_sha512 0x0000000d
+#define regk_crypto_init        0x00000000
+#define regk_crypto_key_128     0x00000000
+#define regk_crypto_key_192     0x00000001
+#define regk_crypto_key_256     0x00000002
+#define regk_crypto_null        0x00000000
+#define regk_crypto_sha1        0x00000006
+#define regk_crypto_sha256      0x00000008
+#define regk_crypto_sha384      0x0000000a
+#define regk_crypto_sha512      0x0000000c
+
+/* DMA descriptor structures */
+struct pdma_descr_ctrl  {
+	unsigned char short_descr : 1;
+	unsigned char pad1        : 1;
+	unsigned char eop         : 1;
+	unsigned char intr        : 1;
+	unsigned char short_len   : 3;
+	unsigned char pad2        : 1;
+} __packed;
+
+struct pdma_data_descr {
+	unsigned int len : 24;
+	unsigned int buf : 32;
+} __packed;
+
+struct pdma_short_descr {
+	unsigned char data[7];
+} __packed;
+
+struct pdma_descr {
+	struct pdma_descr_ctrl ctrl;
+	union {
+		struct pdma_data_descr   data;
+		struct pdma_short_descr  shrt;
+	};
+};
+
+struct pdma_stat_descr {
+	unsigned char pad1        : 1;
+	unsigned char pad2        : 1;
+	unsigned char eop         : 1;
+	unsigned char pad3        : 5;
+	unsigned int  len         : 24;
+};
+
+/* Each descriptor array can hold max 64 entries */
+#define PDMA_DESCR_COUNT	64
+
+#define MODULE_NAME   "Artpec-6 CA"
+
+/* Hash modes (including HMAC variants) */
+#define ARTPEC6_CRYPTO_HASH_SHA1	1
+#define ARTPEC6_CRYPTO_HASH_SHA256	2
+#define ARTPEC6_CRYPTO_HASH_SHA384	3
+#define ARTPEC6_CRYPTO_HASH_SHA512	4
+
+/* Crypto modes */
+#define ARTPEC6_CRYPTO_CIPHER_AES_ECB	1
+#define ARTPEC6_CRYPTO_CIPHER_AES_CBC	2
+#define ARTPEC6_CRYPTO_CIPHER_AES_CTR	3
+#define ARTPEC6_CRYPTO_CIPHER_AES_XTS	5
+
+/* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
+ * It operates on a descriptor array with up to 64 descriptor entries.
+ * The arrays must be 64 byte aligned in memory.
+ *
+ * The ciphering unit has no registers and is completely controlled by
+ * a 4-byte metadata that is inserted at the beginning of each dma packet.
+ *
+ * A dma packet is a sequence of descriptors terminated by setting the .eop
+ * field in the final descriptor of the packet.
+ *
+ * Multiple packets are used for providing context data, key data and
+ * the plain/ciphertext.
+ *
+ *   PDMA Descriptors (Array)
+ *  +------+------+------+~~+-------+------+----
+ *  |  0   |  1   |  2   |~~| 11 EOP|  12  |  ....
+ *  +--+---+--+---+----+-+~~+-------+----+-+----
+ *     |      |        |       |         |
+ *     |      |        |       |         |
+ *   __|__  +-------++-------++-------+ +----+
+ *  | MD  | |Payload||Payload||Payload| | MD |
+ *  +-----+ +-------++-------++-------+ +----+
+ */
+
+struct artpec6_crypto_bounce_buffer {
+	struct list_head list;
+	size_t length;
+	struct scatterlist *sg;
+	size_t offset;
+	/* buf is aligned to ARTPEC_CACHE_LINE_MAX and
+	 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
+	 */
+	void *buf;
+};
+
+struct artpec6_crypto_dma_map {
+	dma_addr_t dma_addr;
+	size_t size;
+	enum dma_data_direction dir;
+};
+
+struct artpec6_crypto_dma_descriptors {
+	struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
+	struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
+	u32 stat[PDMA_DESCR_COUNT] __aligned(64);
+	struct list_head bounce_buffers;
+	/* Enough maps for all out/in buffers, and all three descr. arrays */
+	struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
+	dma_addr_t out_dma_addr;
+	dma_addr_t in_dma_addr;
+	dma_addr_t stat_dma_addr;
+	size_t out_cnt;
+	size_t in_cnt;
+	size_t map_count;
+};
+
+enum artpec6_crypto_variant {
+	ARTPEC6_CRYPTO,
+	ARTPEC7_CRYPTO,
+};
+
+struct artpec6_crypto {
+	void __iomem *base;
+	spinlock_t queue_lock;
+	struct list_head queue; /* waiting for pdma fifo space */
+	struct list_head pending; /* submitted to pdma fifo */
+	struct tasklet_struct task;
+	struct kmem_cache *dma_cache;
+	int pending_count;
+	struct timer_list timer;
+	enum artpec6_crypto_variant variant;
+	void *pad_buffer; /* cache-aligned block padding buffer */
+	void *zero_buffer;
+};
+
+enum artpec6_crypto_hash_flags {
+	HASH_FLAG_INIT_CTX = 2,
+	HASH_FLAG_UPDATE = 4,
+	HASH_FLAG_FINALIZE = 8,
+	HASH_FLAG_HMAC = 16,
+	HASH_FLAG_UPDATE_KEY = 32,
+};
+
+struct artpec6_crypto_req_common {
+	struct list_head list;
+	struct artpec6_crypto_dma_descriptors *dma;
+	struct crypto_async_request *req;
+	void (*complete)(struct crypto_async_request *req);
+	gfp_t gfp_flags;
+};
+
+struct artpec6_hash_request_context {
+	char partial_buffer[SHA512_BLOCK_SIZE];
+	char partial_buffer_out[SHA512_BLOCK_SIZE];
+	char key_buffer[SHA512_BLOCK_SIZE];
+	char pad_buffer[SHA512_BLOCK_SIZE + 32];
+	unsigned char digeststate[SHA512_DIGEST_SIZE];
+	size_t partial_bytes;
+	u64 digcnt;
+	u32 key_md;
+	u32 hash_md;
+	enum artpec6_crypto_hash_flags hash_flags;
+	struct artpec6_crypto_req_common common;
+};
+
+struct artpec6_hash_export_state {
+	char partial_buffer[SHA512_BLOCK_SIZE];
+	unsigned char digeststate[SHA512_DIGEST_SIZE];
+	size_t partial_bytes;
+	u64 digcnt;
+	int oper;
+	unsigned int hash_flags;
+};
+
+struct artpec6_hashalg_context {
+	char hmac_key[SHA512_BLOCK_SIZE];
+	size_t hmac_key_length;
+	struct crypto_shash *child_hash;
+};
+
+struct artpec6_crypto_request_context {
+	u32 cipher_md;
+	bool decrypt;
+	struct artpec6_crypto_req_common common;
+};
+
+struct artpec6_cryptotfm_context {
+	unsigned char aes_key[2*AES_MAX_KEY_SIZE];
+	size_t key_length;
+	u32 key_md;
+	int crypto_type;
+	struct crypto_skcipher *fallback;
+};
+
+struct artpec6_crypto_aead_hw_ctx {
+	__be64	aad_length_bits;
+	__be64  text_length_bits;
+	__u8	J0[AES_BLOCK_SIZE];
+};
+
+struct artpec6_crypto_aead_req_ctx {
+	struct artpec6_crypto_aead_hw_ctx hw_ctx;
+	u32 cipher_md;
+	bool decrypt;
+	struct artpec6_crypto_req_common common;
+	__u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
+};
+
+/* The crypto framework makes it hard to avoid this global. */
+static struct device *artpec6_crypto_dev;
+
+#ifdef CONFIG_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
+static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
+#endif
+
+enum {
+	ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
+	ARTPEC6_CRYPTO_PREPARE_HASH_START,
+};
+
+static int artpec6_crypto_prepare_aead(struct aead_request *areq);
+static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
+static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
+
+static void
+artpec6_crypto_complete_crypto(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_aead(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_hash(struct crypto_async_request *req);
+
+static int
+artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
+
+static void
+artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
+
+struct artpec6_crypto_walk {
+	struct scatterlist *sg;
+	size_t offset;
+};
+
+static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
+				     struct scatterlist *sg)
+{
+	awalk->sg = sg;
+	awalk->offset = 0;
+}
+
+static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
+					  size_t nbytes)
+{
+	while (nbytes && awalk->sg) {
+		size_t piece;
+
+		WARN_ON(awalk->offset > awalk->sg->length);
+
+		piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
+		nbytes -= piece;
+		awalk->offset += piece;
+		if (awalk->offset == awalk->sg->length) {
+			awalk->sg = sg_next(awalk->sg);
+			awalk->offset = 0;
+		}
+
+	}
+
+	return nbytes;
+}
+
+static size_t
+artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
+{
+	WARN_ON(awalk->sg->length == awalk->offset);
+
+	return awalk->sg->length - awalk->offset;
+}
+
+static dma_addr_t
+artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
+{
+	return sg_phys(awalk->sg) + awalk->offset;
+}
+
+static void
+artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	struct artpec6_crypto_bounce_buffer *b;
+	struct artpec6_crypto_bounce_buffer *next;
+
+	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
+		pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
+			 b, b->length, b->offset, b->buf);
+		sg_pcopy_from_buffer(b->sg,
+				   1,
+				   b->buf,
+				   b->length,
+				   b->offset);
+
+		list_del(&b->list);
+		kfree(b);
+	}
+}
+
+static inline bool artpec6_crypto_busy(void)
+{
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	int fifo_count = ac->pending_count;
+
+	return fifo_count > 6;
+}
+
+static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
+{
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	int ret = -EBUSY;
+
+	spin_lock_bh(&ac->queue_lock);
+
+	if (!artpec6_crypto_busy()) {
+		list_add_tail(&req->list, &ac->pending);
+		artpec6_crypto_start_dma(req);
+		ret = -EINPROGRESS;
+	} else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+		list_add_tail(&req->list, &ac->queue);
+	} else {
+		artpec6_crypto_common_destroy(req);
+	}
+
+	spin_unlock_bh(&ac->queue_lock);
+
+	return ret;
+}
+
+static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
+{
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	enum artpec6_crypto_variant variant = ac->variant;
+	void __iomem *base = ac->base;
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	u32 ind, statd, outd;
+
+	/* Make descriptor content visible to the DMA before starting it. */
+	wmb();
+
+	ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
+	      FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
+
+	statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
+		FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
+
+	outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
+	       FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
+
+	if (variant == ARTPEC6_CRYPTO) {
+		writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
+		writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
+		writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
+	} else {
+		writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
+		writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
+		writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
+	}
+
+	writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
+	writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
+
+	ac->pending_count++;
+}
+
+static void
+artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+
+	dma->out_cnt = 0;
+	dma->in_cnt = 0;
+	dma->map_count = 0;
+	INIT_LIST_HEAD(&dma->bounce_buffers);
+}
+
+static bool fault_inject_dma_descr(void)
+{
+#ifdef CONFIG_FAULT_INJECTION
+	return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
+#else
+	return false;
+#endif
+}
+
+/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
+ *                                        physical address
+ *
+ * @addr: The physical address of the data buffer
+ * @len:  The length of the data buffer
+ * @eop:  True if this is the last buffer in the packet
+ *
+ * @return 0 on success or -ENOSPC if there are no more descriptors available
+ */
+static int
+artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
+				    dma_addr_t addr, size_t len, bool eop)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	struct pdma_descr *d;
+
+	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
+	    fault_inject_dma_descr()) {
+		pr_err("No free OUT DMA descriptors available!\n");
+		return -ENOSPC;
+	}
+
+	d = &dma->out[dma->out_cnt++];
+	memset(d, 0, sizeof(*d));
+
+	d->ctrl.short_descr = 0;
+	d->ctrl.eop = eop;
+	d->data.len = len;
+	d->data.buf = addr;
+	return 0;
+}
+
+/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
+ *
+ * @dst: The virtual address of the data
+ * @len: The length of the data, must be between 1 to 7 bytes
+ * @eop: True if this is the last buffer in the packet
+ *
+ * @return 0 on success
+ *	-ENOSPC if no more descriptors are available
+ *	-EINVAL if the data length exceeds 7 bytes
+ */
+static int
+artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
+				     void *dst, unsigned int len, bool eop)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	struct pdma_descr *d;
+
+	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
+	    fault_inject_dma_descr()) {
+		pr_err("No free OUT DMA descriptors available!\n");
+		return -ENOSPC;
+	} else if (len > 7 || len < 1) {
+		return -EINVAL;
+	}
+	d = &dma->out[dma->out_cnt++];
+	memset(d, 0, sizeof(*d));
+
+	d->ctrl.short_descr = 1;
+	d->ctrl.short_len = len;
+	d->ctrl.eop = eop;
+	memcpy(d->shrt.data, dst, len);
+	return 0;
+}
+
+static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
+				      struct page *page, size_t offset,
+				      size_t size,
+				      enum dma_data_direction dir,
+				      dma_addr_t *dma_addr_out)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	struct device *dev = artpec6_crypto_dev;
+	struct artpec6_crypto_dma_map *map;
+	dma_addr_t dma_addr;
+
+	*dma_addr_out = 0;
+
+	if (dma->map_count >= ARRAY_SIZE(dma->maps))
+		return -ENOMEM;
+
+	dma_addr = dma_map_page(dev, page, offset, size, dir);
+	if (dma_mapping_error(dev, dma_addr))
+		return -ENOMEM;
+
+	map = &dma->maps[dma->map_count++];
+	map->size = size;
+	map->dma_addr = dma_addr;
+	map->dir = dir;
+
+	*dma_addr_out = dma_addr;
+
+	return 0;
+}
+
+static int
+artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
+			      void *ptr, size_t size,
+			      enum dma_data_direction dir,
+			      dma_addr_t *dma_addr_out)
+{
+	struct page *page = virt_to_page(ptr);
+	size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
+
+	return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
+					  dma_addr_out);
+}
+
+static int
+artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	int ret;
+
+	ret = artpec6_crypto_dma_map_single(common, dma->in,
+				sizeof(dma->in[0]) * dma->in_cnt,
+				DMA_TO_DEVICE, &dma->in_dma_addr);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_dma_map_single(common, dma->out,
+				sizeof(dma->out[0]) * dma->out_cnt,
+				DMA_TO_DEVICE, &dma->out_dma_addr);
+	if (ret)
+		return ret;
+
+	/* We only read one stat descriptor */
+	dma->stat[dma->in_cnt - 1] = 0;
+
+	/*
+	 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
+	 * to be written.
+	 */
+	return artpec6_crypto_dma_map_single(common,
+				dma->stat + dma->in_cnt - 1,
+				sizeof(dma->stat[0]),
+				DMA_BIDIRECTIONAL,
+				&dma->stat_dma_addr);
+}
+
+static void
+artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	struct device *dev = artpec6_crypto_dev;
+	int i;
+
+	for (i = 0; i < dma->map_count; i++) {
+		struct artpec6_crypto_dma_map *map = &dma->maps[i];
+
+		dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
+	}
+
+	dma->map_count = 0;
+}
+
+/** artpec6_crypto_setup_out_descr - Setup an out descriptor
+ *
+ * @dst: The virtual address of the data
+ * @len: The length of the data
+ * @eop: True if this is the last buffer in the packet
+ * @use_short: If this is true and the data length is 7 bytes or less then
+ *	a short descriptor will be used
+ *
+ * @return 0 on success
+ *	Any errors from artpec6_crypto_setup_out_descr_short() or
+ *	setup_out_descr_phys()
+ */
+static int
+artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
+			       void *dst, unsigned int len, bool eop,
+			       bool use_short)
+{
+	if (use_short && len < 7) {
+		return artpec6_crypto_setup_out_descr_short(common, dst, len,
+							    eop);
+	} else {
+		int ret;
+		dma_addr_t dma_addr;
+
+		ret = artpec6_crypto_dma_map_single(common, dst, len,
+						   DMA_TO_DEVICE,
+						   &dma_addr);
+		if (ret)
+			return ret;
+
+		return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
+							   len, eop);
+	}
+}
+
+/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
+ *                                       physical address
+ *
+ * @addr: The physical address of the data buffer
+ * @len:  The length of the data buffer
+ * @intr: True if an interrupt should be fired after HW processing of this
+ *	  descriptor
+ *
+ */
+static int
+artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
+			       dma_addr_t addr, unsigned int len, bool intr)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	struct pdma_descr *d;
+
+	if (dma->in_cnt >= PDMA_DESCR_COUNT ||
+	    fault_inject_dma_descr()) {
+		pr_err("No free IN DMA descriptors available!\n");
+		return -ENOSPC;
+	}
+	d = &dma->in[dma->in_cnt++];
+	memset(d, 0, sizeof(*d));
+
+	d->ctrl.intr = intr;
+	d->data.len = len;
+	d->data.buf = addr;
+	return 0;
+}
+
+/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
+ *
+ * @buffer: The virtual address to of the data buffer
+ * @len:    The length of the data buffer
+ * @last:   If this is the last data buffer in the request (i.e. an interrupt
+ *	    is needed
+ *
+ * Short descriptors are not used for the in channel
+ */
+static int
+artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
+			  void *buffer, unsigned int len, bool last)
+{
+	dma_addr_t dma_addr;
+	int ret;
+
+	ret = artpec6_crypto_dma_map_single(common, buffer, len,
+					   DMA_FROM_DEVICE, &dma_addr);
+	if (ret)
+		return ret;
+
+	return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
+}
+
+static struct artpec6_crypto_bounce_buffer *
+artpec6_crypto_alloc_bounce(gfp_t flags)
+{
+	void *base;
+	size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
+			    2 * ARTPEC_CACHE_LINE_MAX;
+	struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
+
+	if (!bbuf)
+		return NULL;
+
+	base = bbuf + 1;
+	bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
+	return bbuf;
+}
+
+static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
+				  struct artpec6_crypto_walk *walk, size_t size)
+{
+	struct artpec6_crypto_bounce_buffer *bbuf;
+	int ret;
+
+	bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
+	if (!bbuf)
+		return -ENOMEM;
+
+	bbuf->length = size;
+	bbuf->sg = walk->sg;
+	bbuf->offset = walk->offset;
+
+	ret =  artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
+	if (ret) {
+		kfree(bbuf);
+		return ret;
+	}
+
+	pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
+	list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
+	return 0;
+}
+
+static int
+artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
+				  struct artpec6_crypto_walk *walk,
+				  size_t count)
+{
+	size_t chunk;
+	int ret;
+	dma_addr_t addr;
+
+	while (walk->sg && count) {
+		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
+		addr = artpec6_crypto_walk_chunk_phys(walk);
+
+		/* When destination buffers are not aligned to the cache line
+		 * size we need bounce buffers. The DMA-API requires that the
+		 * entire line is owned by the DMA buffer and this holds also
+		 * for the case when coherent DMA is used.
+		 */
+		if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
+			chunk = min_t(dma_addr_t, chunk,
+				      ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
+				      addr);
+
+			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
+			ret = setup_bounce_buffer_in(common, walk, chunk);
+		} else if (chunk < ARTPEC_CACHE_LINE_MAX) {
+			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
+			ret = setup_bounce_buffer_in(common, walk, chunk);
+		} else {
+			dma_addr_t dma_addr;
+
+			chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
+
+			pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
+
+			ret = artpec6_crypto_dma_map_page(common,
+							 sg_page(walk->sg),
+							 walk->sg->offset +
+							 walk->offset,
+							 chunk,
+							 DMA_FROM_DEVICE,
+							 &dma_addr);
+			if (ret)
+				return ret;
+
+			ret = artpec6_crypto_setup_in_descr_phys(common,
+								 dma_addr,
+								 chunk, false);
+		}
+
+		if (ret)
+			return ret;
+
+		count = count - chunk;
+		artpec6_crypto_walk_advance(walk, chunk);
+	}
+
+	if (count)
+		pr_err("EOL unexpected %zu bytes left\n", count);
+
+	return count ? -EINVAL : 0;
+}
+
+static int
+artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
+				   struct artpec6_crypto_walk *walk,
+				   size_t count)
+{
+	size_t chunk;
+	int ret;
+	dma_addr_t addr;
+
+	while (walk->sg && count) {
+		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
+		addr = artpec6_crypto_walk_chunk_phys(walk);
+
+		pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
+
+		if (addr & 3) {
+			char buf[3];
+
+			chunk = min_t(size_t, chunk, (4-(addr&3)));
+
+			sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
+					   walk->offset);
+
+			ret = artpec6_crypto_setup_out_descr_short(common, buf,
+								   chunk,
+								   false);
+		} else {
+			dma_addr_t dma_addr;
+
+			ret = artpec6_crypto_dma_map_page(common,
+							 sg_page(walk->sg),
+							 walk->sg->offset +
+							 walk->offset,
+							 chunk,
+							 DMA_TO_DEVICE,
+							 &dma_addr);
+			if (ret)
+				return ret;
+
+			ret = artpec6_crypto_setup_out_descr_phys(common,
+								 dma_addr,
+								 chunk, false);
+		}
+
+		if (ret)
+			return ret;
+
+		count = count - chunk;
+		artpec6_crypto_walk_advance(walk, chunk);
+	}
+
+	if (count)
+		pr_err("EOL unexpected %zu bytes left\n", count);
+
+	return count ? -EINVAL : 0;
+}
+
+
+/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
+ *
+ * If the out descriptor list is non-empty, then the eop flag on the
+ * last used out descriptor will be set.
+ *
+ * @return  0 on success
+ *	-EINVAL if the out descriptor is empty or has overflown
+ */
+static int
+artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	struct pdma_descr *d;
+
+	if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
+		pr_err("%s: OUT descriptor list is %s\n",
+			MODULE_NAME, dma->out_cnt ? "empty" : "full");
+		return -EINVAL;
+
+	}
+
+	d = &dma->out[dma->out_cnt-1];
+	d->ctrl.eop = 1;
+
+	return 0;
+}
+
+/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
+ *                                       in descriptor
+ *
+ * See artpec6_crypto_terminate_out_descrs() for return values
+ */
+static int
+artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
+{
+	struct artpec6_crypto_dma_descriptors *dma = common->dma;
+	struct pdma_descr *d;
+
+	if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
+		pr_err("%s: IN descriptor list is %s\n",
+			MODULE_NAME, dma->in_cnt ? "empty" : "full");
+		return -EINVAL;
+	}
+
+	d = &dma->in[dma->in_cnt-1];
+	d->ctrl.intr = 1;
+	return 0;
+}
+
+/** create_hash_pad - Create a Secure Hash conformant pad
+ *
+ * @dst:      The destination buffer to write the pad. Must be at least 64 bytes
+ * @dgstlen:  The total length of the hash digest in bytes
+ * @bitcount: The total length of the digest in bits
+ *
+ * @return The total number of padding bytes written to @dst
+ */
+static size_t
+create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
+{
+	unsigned int mod, target, diff, pad_bytes, size_bytes;
+	__be64 bits = __cpu_to_be64(bitcount);
+
+	switch (oper) {
+	case regk_crypto_sha1:
+	case regk_crypto_sha256:
+	case regk_crypto_hmac_sha1:
+	case regk_crypto_hmac_sha256:
+		target = 448 / 8;
+		mod = 512 / 8;
+		size_bytes = 8;
+		break;
+	default:
+		target = 896 / 8;
+		mod = 1024 / 8;
+		size_bytes = 16;
+		break;
+	}
+
+	target -= 1;
+	diff = dgstlen & (mod - 1);
+	pad_bytes = diff > target ? target + mod - diff : target - diff;
+
+	memset(dst + 1, 0, pad_bytes);
+	dst[0] = 0x80;
+
+	if (size_bytes == 16) {
+		memset(dst + 1 + pad_bytes, 0, 8);
+		memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
+	} else {
+		memcpy(dst + 1 + pad_bytes, &bits, 8);
+	}
+
+	return pad_bytes + size_bytes + 1;
+}
+
+static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
+		struct crypto_async_request *parent,
+		void (*complete)(struct crypto_async_request *req),
+		struct scatterlist *dstsg, unsigned int nbytes)
+{
+	gfp_t flags;
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+
+	flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		 GFP_KERNEL : GFP_ATOMIC;
+
+	common->gfp_flags = flags;
+	common->dma = kmem_cache_alloc(ac->dma_cache, flags);
+	if (!common->dma)
+		return -ENOMEM;
+
+	common->req = parent;
+	common->complete = complete;
+	return 0;
+}
+
+static void
+artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
+{
+	struct artpec6_crypto_bounce_buffer *b;
+	struct artpec6_crypto_bounce_buffer *next;
+
+	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
+		kfree(b);
+	}
+}
+
+static int
+artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
+{
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+
+	artpec6_crypto_dma_unmap_all(common);
+	artpec6_crypto_bounce_destroy(common->dma);
+	kmem_cache_free(ac->dma_cache, common->dma);
+	common->dma = NULL;
+	return 0;
+}
+
+/*
+ * Ciphering functions.
+ */
+static int artpec6_crypto_encrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+	struct artpec6_crypto_request_context *req_ctx = NULL;
+	void (*complete)(struct crypto_async_request *req);
+	int ret;
+
+	req_ctx = skcipher_request_ctx(req);
+
+	switch (ctx->crypto_type) {
+	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+		req_ctx->decrypt = 0;
+		break;
+	default:
+		break;
+	}
+
+	switch (ctx->crypto_type) {
+	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+		complete = artpec6_crypto_complete_cbc_encrypt;
+		break;
+	default:
+		complete = artpec6_crypto_complete_crypto;
+		break;
+	}
+
+	ret = artpec6_crypto_common_init(&req_ctx->common,
+				  &req->base,
+				  complete,
+				  req->dst, req->cryptlen);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_prepare_crypto(req);
+	if (ret) {
+		artpec6_crypto_common_destroy(&req_ctx->common);
+		return ret;
+	}
+
+	return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_decrypt(struct skcipher_request *req)
+{
+	int ret;
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+	struct artpec6_crypto_request_context *req_ctx = NULL;
+	void (*complete)(struct crypto_async_request *req);
+
+	req_ctx = skcipher_request_ctx(req);
+
+	switch (ctx->crypto_type) {
+	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+		req_ctx->decrypt = 1;
+		break;
+	default:
+		break;
+	}
+
+
+	switch (ctx->crypto_type) {
+	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+		complete = artpec6_crypto_complete_cbc_decrypt;
+		break;
+	default:
+		complete = artpec6_crypto_complete_crypto;
+		break;
+	}
+
+	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
+				  complete,
+				  req->dst, req->cryptlen);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_prepare_crypto(req);
+	if (ret) {
+		artpec6_crypto_common_destroy(&req_ctx->common);
+		return ret;
+	}
+
+	return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int
+artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+	size_t iv_len = crypto_skcipher_ivsize(cipher);
+	unsigned int counter = be32_to_cpup((__be32 *)
+					    (req->iv + iv_len - 4));
+	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
+			     AES_BLOCK_SIZE;
+
+	/*
+	 * The hardware uses only the last 32-bits as the counter while the
+	 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
+	 * the whole IV is a counter.  So fallback if the counter is going to
+	 * overlow.
+	 */
+	if (counter + nblks < counter) {
+		int ret;
+
+		pr_debug("counter %x will overflow (nblks %u), falling back\n",
+			 counter, counter + nblks);
+
+		ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
+					     ctx->key_length);
+		if (ret)
+			return ret;
+
+		{
+			SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+			skcipher_request_set_tfm(subreq, ctx->fallback);
+			skcipher_request_set_callback(subreq, req->base.flags,
+						      NULL, NULL);
+			skcipher_request_set_crypt(subreq, req->src, req->dst,
+						   req->cryptlen, req->iv);
+			ret = encrypt ? crypto_skcipher_encrypt(subreq)
+				      : crypto_skcipher_decrypt(subreq);
+			skcipher_request_zero(subreq);
+		}
+		return ret;
+	}
+
+	return encrypt ? artpec6_crypto_encrypt(req)
+		       : artpec6_crypto_decrypt(req);
+}
+
+static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
+{
+	return artpec6_crypto_ctr_crypt(req, true);
+}
+
+static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
+{
+	return artpec6_crypto_ctr_crypt(req, false);
+}
+
+/*
+ * AEAD functions
+ */
+static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
+{
+	struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
+
+	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+	crypto_aead_set_reqsize(tfm,
+				sizeof(struct artpec6_crypto_aead_req_ctx));
+
+	return 0;
+}
+
+static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
+			       unsigned int len)
+{
+	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
+
+	if (len != 16 && len != 24 && len != 32) {
+		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -1;
+	}
+
+	ctx->key_length = len;
+
+	memcpy(ctx->aes_key, key, len);
+	return 0;
+}
+
+static int artpec6_crypto_aead_encrypt(struct aead_request *req)
+{
+	int ret;
+	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+	req_ctx->decrypt = false;
+	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
+				  artpec6_crypto_complete_aead,
+				  NULL, 0);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_prepare_aead(req);
+	if (ret) {
+		artpec6_crypto_common_destroy(&req_ctx->common);
+		return ret;
+	}
+
+	return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_aead_decrypt(struct aead_request *req)
+{
+	int ret;
+	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+	req_ctx->decrypt = true;
+	if (req->cryptlen < AES_BLOCK_SIZE)
+		return -EINVAL;
+
+	ret = artpec6_crypto_common_init(&req_ctx->common,
+				  &req->base,
+				  artpec6_crypto_complete_aead,
+				  NULL, 0);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_prepare_aead(req);
+	if (ret) {
+		artpec6_crypto_common_destroy(&req_ctx->common);
+		return ret;
+	}
+
+	return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
+{
+	struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
+	size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
+	size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
+		SHA512_DIGEST_SIZE : digestsize;
+	size_t blocksize = crypto_tfm_alg_blocksize(
+		crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
+	struct artpec6_crypto_req_common *common = &req_ctx->common;
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	enum artpec6_crypto_variant variant = ac->variant;
+	u32 sel_ctx;
+	bool ext_ctx = false;
+	bool run_hw = false;
+	int error = 0;
+
+	artpec6_crypto_init_dma_operation(common);
+
+	/* Upload HMAC key, must be first the first packet */
+	if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
+		if (variant == ARTPEC6_CRYPTO) {
+			req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
+						     a6_regk_crypto_dlkey);
+		} else {
+			req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
+						     a7_regk_crypto_dlkey);
+		}
+
+		/* Copy and pad up the key */
+		memcpy(req_ctx->key_buffer, ctx->hmac_key,
+		       ctx->hmac_key_length);
+		memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
+		       blocksize - ctx->hmac_key_length);
+
+		error = artpec6_crypto_setup_out_descr(common,
+					(void *)&req_ctx->key_md,
+					sizeof(req_ctx->key_md), false, false);
+		if (error)
+			return error;
+
+		error = artpec6_crypto_setup_out_descr(common,
+					req_ctx->key_buffer, blocksize,
+					true, false);
+		if (error)
+			return error;
+	}
+
+	if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
+		/* Restore context */
+		sel_ctx = regk_crypto_ext;
+		ext_ctx = true;
+	} else {
+		sel_ctx = regk_crypto_init;
+	}
+
+	if (variant == ARTPEC6_CRYPTO) {
+		req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
+		req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
+
+		/* If this is the final round, set the final flag */
+		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
+			req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
+	} else {
+		req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
+		req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
+
+		/* If this is the final round, set the final flag */
+		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
+			req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
+	}
+
+	/* Setup up metadata descriptors */
+	error = artpec6_crypto_setup_out_descr(common,
+				(void *)&req_ctx->hash_md,
+				sizeof(req_ctx->hash_md), false, false);
+	if (error)
+		return error;
+
+	error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+	if (error)
+		return error;
+
+	if (ext_ctx) {
+		error = artpec6_crypto_setup_out_descr(common,
+					req_ctx->digeststate,
+					contextsize, false, false);
+
+		if (error)
+			return error;
+	}
+
+	if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
+		size_t done_bytes = 0;
+		size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
+		size_t ready_bytes = round_down(total_bytes, blocksize);
+		struct artpec6_crypto_walk walk;
+
+		run_hw = ready_bytes > 0;
+		if (req_ctx->partial_bytes && ready_bytes) {
+			/* We have a partial buffer and will at least some bytes
+			 * to the HW. Empty this partial buffer before tackling
+			 * the SG lists
+			 */
+			memcpy(req_ctx->partial_buffer_out,
+				req_ctx->partial_buffer,
+				req_ctx->partial_bytes);
+
+			error = artpec6_crypto_setup_out_descr(common,
+						req_ctx->partial_buffer_out,
+						req_ctx->partial_bytes,
+						false, true);
+			if (error)
+				return error;
+
+			/* Reset partial buffer */
+			done_bytes += req_ctx->partial_bytes;
+			req_ctx->partial_bytes = 0;
+		}
+
+		artpec6_crypto_walk_init(&walk, areq->src);
+
+		error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
+							   ready_bytes -
+							   done_bytes);
+		if (error)
+			return error;
+
+		if (walk.sg) {
+			size_t sg_skip = ready_bytes - done_bytes;
+			size_t sg_rem = areq->nbytes - sg_skip;
+
+			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+					   req_ctx->partial_buffer +
+					   req_ctx->partial_bytes,
+					   sg_rem, sg_skip);
+
+			req_ctx->partial_bytes += sg_rem;
+		}
+
+		req_ctx->digcnt += ready_bytes;
+		req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
+	}
+
+	/* Finalize */
+	if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
+		bool needtrim = contextsize != digestsize;
+		size_t hash_pad_len;
+		u64 digest_bits;
+		u32 oper;
+
+		if (variant == ARTPEC6_CRYPTO)
+			oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
+		else
+			oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
+
+		/* Write out the partial buffer if present */
+		if (req_ctx->partial_bytes) {
+			memcpy(req_ctx->partial_buffer_out,
+			       req_ctx->partial_buffer,
+			       req_ctx->partial_bytes);
+			error = artpec6_crypto_setup_out_descr(common,
+						req_ctx->partial_buffer_out,
+						req_ctx->partial_bytes,
+						false, true);
+			if (error)
+				return error;
+
+			req_ctx->digcnt += req_ctx->partial_bytes;
+			req_ctx->partial_bytes = 0;
+		}
+
+		if (req_ctx->hash_flags & HASH_FLAG_HMAC)
+			digest_bits = 8 * (req_ctx->digcnt + blocksize);
+		else
+			digest_bits = 8 * req_ctx->digcnt;
+
+		/* Add the hash pad */
+		hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
+					       req_ctx->digcnt, digest_bits);
+		error = artpec6_crypto_setup_out_descr(common,
+						      req_ctx->pad_buffer,
+						      hash_pad_len, false,
+						      true);
+		req_ctx->digcnt = 0;
+
+		if (error)
+			return error;
+
+		/* Descriptor for the final result */
+		error = artpec6_crypto_setup_in_descr(common, areq->result,
+						      digestsize,
+						      !needtrim);
+		if (error)
+			return error;
+
+		if (needtrim) {
+			/* Discard the extra context bytes for SHA-384 */
+			error = artpec6_crypto_setup_in_descr(common,
+					req_ctx->partial_buffer,
+					digestsize - contextsize, true);
+			if (error)
+				return error;
+		}
+
+	} else { /* This is not the final operation for this request */
+		if (!run_hw)
+			return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
+
+		/* Save the result to the context */
+		error = artpec6_crypto_setup_in_descr(common,
+						      req_ctx->digeststate,
+						      contextsize, false);
+		if (error)
+			return error;
+		/* fall through */
+	}
+
+	req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
+				 HASH_FLAG_FINALIZE);
+
+	error = artpec6_crypto_terminate_in_descrs(common);
+	if (error)
+		return error;
+
+	error = artpec6_crypto_terminate_out_descrs(common);
+	if (error)
+		return error;
+
+	error = artpec6_crypto_dma_map_descs(common);
+	if (error)
+		return error;
+
+	return ARTPEC6_CRYPTO_PREPARE_HASH_START;
+}
+
+
+static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
+{
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
+
+	return 0;
+}
+
+static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
+{
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+	ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
+					      0,
+					      CRYPTO_ALG_ASYNC |
+					      CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->fallback))
+		return PTR_ERR(ctx->fallback);
+
+	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
+
+	return 0;
+}
+
+static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
+{
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
+
+	return 0;
+}
+
+static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
+{
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
+
+	return 0;
+}
+
+static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
+{
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+	memset(ctx, 0, sizeof(*ctx));
+}
+
+static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
+{
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+	crypto_free_skcipher(ctx->fallback);
+	artpec6_crypto_aes_exit(tfm);
+}
+
+static int
+artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
+			      unsigned int keylen)
+{
+	struct artpec6_cryptotfm_context *ctx =
+		crypto_skcipher_ctx(cipher);
+
+	switch (keylen) {
+	case 16:
+	case 24:
+	case 32:
+		break;
+	default:
+		crypto_skcipher_set_flags(cipher,
+					  CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->aes_key, key, keylen);
+	ctx->key_length = keylen;
+	return 0;
+}
+
+static int
+artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
+			      unsigned int keylen)
+{
+	struct artpec6_cryptotfm_context *ctx =
+		crypto_skcipher_ctx(cipher);
+	int ret;
+
+	ret = xts_check_key(&cipher->base, key, keylen);
+	if (ret)
+		return ret;
+
+	switch (keylen) {
+	case 32:
+	case 48:
+	case 64:
+		break;
+	default:
+		crypto_skcipher_set_flags(cipher,
+					  CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->aes_key, key, keylen);
+	ctx->key_length = keylen;
+	return 0;
+}
+
+/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
+ *
+ * @req: The asynch request to process
+ *
+ * @return 0 if the dma job was successfully prepared
+ *	  <0 on error
+ *
+ * This function sets up the PDMA descriptors for a block cipher request.
+ *
+ * The required padding is added for AES-CTR using a statically defined
+ * buffer.
+ *
+ * The PDMA descriptor list will be as follows:
+ *
+ * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
+ * IN:  <CIPHER_MD><data_0>...[data_n]<intr>
+ *
+ */
+static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
+{
+	int ret;
+	struct artpec6_crypto_walk walk;
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+	struct artpec6_crypto_request_context *req_ctx = NULL;
+	size_t iv_len = crypto_skcipher_ivsize(cipher);
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	enum artpec6_crypto_variant variant = ac->variant;
+	struct artpec6_crypto_req_common *common;
+	bool cipher_decr = false;
+	size_t cipher_klen;
+	u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
+	u32 oper;
+
+	req_ctx = skcipher_request_ctx(areq);
+	common = &req_ctx->common;
+
+	artpec6_crypto_init_dma_operation(common);
+
+	if (variant == ARTPEC6_CRYPTO)
+		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
+	else
+		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
+
+	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
+					     sizeof(ctx->key_md), false, false);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
+					      ctx->key_length, true, false);
+	if (ret)
+		return ret;
+
+	req_ctx->cipher_md = 0;
+
+	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
+		cipher_klen = ctx->key_length/2;
+	else
+		cipher_klen =  ctx->key_length;
+
+	/* Metadata */
+	switch (cipher_klen) {
+	case 16:
+		cipher_len = regk_crypto_key_128;
+		break;
+	case 24:
+		cipher_len = regk_crypto_key_192;
+		break;
+	case 32:
+		cipher_len = regk_crypto_key_256;
+		break;
+	default:
+		pr_err("%s: Invalid key length %d!\n",
+			MODULE_NAME, ctx->key_length);
+		return -EINVAL;
+	}
+
+	switch (ctx->crypto_type) {
+	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+		oper = regk_crypto_aes_ecb;
+		cipher_decr = req_ctx->decrypt;
+		break;
+
+	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+		oper = regk_crypto_aes_cbc;
+		cipher_decr = req_ctx->decrypt;
+		break;
+
+	case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
+		oper = regk_crypto_aes_ctr;
+		cipher_decr = false;
+		break;
+
+	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+		oper = regk_crypto_aes_xts;
+		cipher_decr = req_ctx->decrypt;
+
+		if (variant == ARTPEC6_CRYPTO)
+			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
+		else
+			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
+		break;
+
+	default:
+		pr_err("%s: Invalid cipher mode %d!\n",
+			MODULE_NAME, ctx->crypto_type);
+		return -EINVAL;
+	}
+
+	if (variant == ARTPEC6_CRYPTO) {
+		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
+		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
+						 cipher_len);
+		if (cipher_decr)
+			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
+	} else {
+		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
+		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
+						 cipher_len);
+		if (cipher_decr)
+			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
+	}
+
+	ret = artpec6_crypto_setup_out_descr(common,
+					    &req_ctx->cipher_md,
+					    sizeof(req_ctx->cipher_md),
+					    false, false);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+	if (ret)
+		return ret;
+
+	if (iv_len) {
+		ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
+						     false, false);
+		if (ret)
+			return ret;
+	}
+	/* Data out */
+	artpec6_crypto_walk_init(&walk, areq->src);
+	ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
+	if (ret)
+		return ret;
+
+	/* Data in */
+	artpec6_crypto_walk_init(&walk, areq->dst);
+	ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
+	if (ret)
+		return ret;
+
+	/* CTR-mode padding required by the HW. */
+	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
+	    ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
+		size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
+			     areq->cryptlen;
+
+		if (pad) {
+			ret = artpec6_crypto_setup_out_descr(common,
+							     ac->pad_buffer,
+							     pad, false, false);
+			if (ret)
+				return ret;
+
+			ret = artpec6_crypto_setup_in_descr(common,
+							    ac->pad_buffer, pad,
+							    false);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = artpec6_crypto_terminate_out_descrs(common);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_terminate_in_descrs(common);
+	if (ret)
+		return ret;
+
+	return artpec6_crypto_dma_map_descs(common);
+}
+
+static int artpec6_crypto_prepare_aead(struct aead_request *areq)
+{
+	size_t count;
+	int ret;
+	size_t input_length;
+	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
+	struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
+	struct artpec6_crypto_req_common *common = &req_ctx->common;
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	enum artpec6_crypto_variant variant = ac->variant;
+	u32 md_cipher_len;
+
+	artpec6_crypto_init_dma_operation(common);
+
+	/* Key */
+	if (variant == ARTPEC6_CRYPTO) {
+		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
+					 a6_regk_crypto_dlkey);
+	} else {
+		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
+					 a7_regk_crypto_dlkey);
+	}
+	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
+					     sizeof(ctx->key_md), false, false);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
+					     ctx->key_length, true, false);
+	if (ret)
+		return ret;
+
+	req_ctx->cipher_md = 0;
+
+	switch (ctx->key_length) {
+	case 16:
+		md_cipher_len = regk_crypto_key_128;
+		break;
+	case 24:
+		md_cipher_len = regk_crypto_key_192;
+		break;
+	case 32:
+		md_cipher_len = regk_crypto_key_256;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (variant == ARTPEC6_CRYPTO) {
+		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
+						 regk_crypto_aes_gcm);
+		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
+						 md_cipher_len);
+		if (req_ctx->decrypt)
+			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
+	} else {
+		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
+						 regk_crypto_aes_gcm);
+		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
+						 md_cipher_len);
+		if (req_ctx->decrypt)
+			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
+	}
+
+	ret = artpec6_crypto_setup_out_descr(common,
+					    (void *) &req_ctx->cipher_md,
+					    sizeof(req_ctx->cipher_md), false,
+					    false);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+	if (ret)
+		return ret;
+
+	/* For the decryption, cryptlen includes the tag. */
+	input_length = areq->cryptlen;
+	if (req_ctx->decrypt)
+		input_length -= AES_BLOCK_SIZE;
+
+	/* Prepare the context buffer */
+	req_ctx->hw_ctx.aad_length_bits =
+		__cpu_to_be64(8*areq->assoclen);
+
+	req_ctx->hw_ctx.text_length_bits =
+		__cpu_to_be64(8*input_length);
+
+	memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
+	// The HW omits the initial increment of the counter field.
+	memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
+
+	ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
+		sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
+	if (ret)
+		return ret;
+
+	{
+		struct artpec6_crypto_walk walk;
+
+		artpec6_crypto_walk_init(&walk, areq->src);
+
+		/* Associated data */
+		count = areq->assoclen;
+		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
+		if (ret)
+			return ret;
+
+		if (!IS_ALIGNED(areq->assoclen, 16)) {
+			size_t assoc_pad = 16 - (areq->assoclen % 16);
+			/* The HW mandates zero padding here */
+			ret = artpec6_crypto_setup_out_descr(common,
+							     ac->zero_buffer,
+							     assoc_pad, false,
+							     false);
+			if (ret)
+				return ret;
+		}
+
+		/* Data to crypto */
+		count = input_length;
+		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
+		if (ret)
+			return ret;
+
+		if (!IS_ALIGNED(input_length, 16)) {
+			size_t crypto_pad = 16 - (input_length % 16);
+			/* The HW mandates zero padding here */
+			ret = artpec6_crypto_setup_out_descr(common,
+							     ac->zero_buffer,
+							     crypto_pad,
+							     false,
+							     false);
+			if (ret)
+				return ret;
+		}
+	}
+
+	/* Data from crypto */
+	{
+		struct artpec6_crypto_walk walk;
+		size_t output_len = areq->cryptlen;
+
+		if (req_ctx->decrypt)
+			output_len -= AES_BLOCK_SIZE;
+
+		artpec6_crypto_walk_init(&walk, areq->dst);
+
+		/* skip associated data in the output */
+		count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
+		if (count)
+			return -EINVAL;
+
+		count = output_len;
+		ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
+		if (ret)
+			return ret;
+
+		/* Put padding between the cryptotext and the auth tag */
+		if (!IS_ALIGNED(output_len, 16)) {
+			size_t crypto_pad = 16 - (output_len % 16);
+
+			ret = artpec6_crypto_setup_in_descr(common,
+							    ac->pad_buffer,
+							    crypto_pad, false);
+			if (ret)
+				return ret;
+		}
+
+		/* The authentication tag shall follow immediately after
+		 * the output ciphertext. For decryption it is put in a context
+		 * buffer for later compare against the input tag.
+		 */
+		count = AES_BLOCK_SIZE;
+
+		if (req_ctx->decrypt) {
+			ret = artpec6_crypto_setup_in_descr(common,
+				req_ctx->decryption_tag, count, false);
+			if (ret)
+				return ret;
+
+		} else {
+			ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
+								count);
+			if (ret)
+				return ret;
+		}
+
+	}
+
+	ret = artpec6_crypto_terminate_in_descrs(common);
+	if (ret)
+		return ret;
+
+	ret = artpec6_crypto_terminate_out_descrs(common);
+	if (ret)
+		return ret;
+
+	return artpec6_crypto_dma_map_descs(common);
+}
+
+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
+{
+	struct artpec6_crypto_req_common *req;
+
+	while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
+		req = list_first_entry(&ac->queue,
+				       struct artpec6_crypto_req_common,
+				       list);
+		list_move_tail(&req->list, &ac->pending);
+		artpec6_crypto_start_dma(req);
+
+		req->req->complete(req->req, -EINPROGRESS);
+	}
+
+	/*
+	 * In some cases, the hardware can raise an in_eop_flush interrupt
+	 * before actually updating the status, so we have an timer which will
+	 * recheck the status on timeout.  Since the cases are expected to be
+	 * very rare, we use a relatively large timeout value.  There should be
+	 * no noticeable negative effect if we timeout spuriously.
+	 */
+	if (ac->pending_count)
+		mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
+	else
+		del_timer(&ac->timer);
+}
+
+static void artpec6_crypto_timeout(struct timer_list *t)
+{
+	struct artpec6_crypto *ac = from_timer(ac, t, timer);
+
+	dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
+
+	tasklet_schedule(&ac->task);
+}
+
+static void artpec6_crypto_task(unsigned long data)
+{
+	struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
+	struct artpec6_crypto_req_common *req;
+	struct artpec6_crypto_req_common *n;
+
+	if (list_empty(&ac->pending)) {
+		pr_debug("Spurious IRQ\n");
+		return;
+	}
+
+	spin_lock_bh(&ac->queue_lock);
+
+	list_for_each_entry_safe(req, n, &ac->pending, list) {
+		struct artpec6_crypto_dma_descriptors *dma = req->dma;
+		u32 stat;
+
+		dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
+					sizeof(dma->stat[0]),
+					DMA_BIDIRECTIONAL);
+
+		stat = req->dma->stat[req->dma->in_cnt-1];
+
+		/* A non-zero final status descriptor indicates
+		 * this job has finished.
+		 */
+		pr_debug("Request %p status is %X\n", req, stat);
+		if (!stat)
+			break;
+
+		/* Allow testing of timeout handling with fault injection */
+#ifdef CONFIG_FAULT_INJECTION
+		if (should_fail(&artpec6_crypto_fail_status_read, 1))
+			continue;
+#endif
+
+		pr_debug("Completing request %p\n", req);
+
+		list_del(&req->list);
+
+		artpec6_crypto_dma_unmap_all(req);
+		artpec6_crypto_copy_bounce_buffers(req);
+
+		ac->pending_count--;
+		artpec6_crypto_common_destroy(req);
+		req->complete(req->req);
+	}
+
+	artpec6_crypto_process_queue(ac);
+
+	spin_unlock_bh(&ac->queue_lock);
+}
+
+static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
+{
+	req->complete(req, 0);
+}
+
+static void
+artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
+{
+	struct skcipher_request *cipher_req = container_of(req,
+		struct skcipher_request, base);
+
+	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
+				 cipher_req->cryptlen - AES_BLOCK_SIZE,
+				 AES_BLOCK_SIZE, 0);
+	req->complete(req, 0);
+}
+
+static void
+artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
+{
+	struct skcipher_request *cipher_req = container_of(req,
+		struct skcipher_request, base);
+
+	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
+				 cipher_req->cryptlen - AES_BLOCK_SIZE,
+				 AES_BLOCK_SIZE, 0);
+	req->complete(req, 0);
+}
+
+static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
+{
+	int result = 0;
+
+	/* Verify GCM hashtag. */
+	struct aead_request *areq = container_of(req,
+		struct aead_request, base);
+	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
+
+	if (req_ctx->decrypt) {
+		u8 input_tag[AES_BLOCK_SIZE];
+
+		sg_pcopy_to_buffer(areq->src,
+				   sg_nents(areq->src),
+				   input_tag,
+				   AES_BLOCK_SIZE,
+				   areq->assoclen + areq->cryptlen -
+				   AES_BLOCK_SIZE);
+
+		if (memcmp(req_ctx->decryption_tag,
+			   input_tag,
+			   AES_BLOCK_SIZE)) {
+			pr_debug("***EBADMSG:\n");
+			print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
+					     input_tag, AES_BLOCK_SIZE, true);
+			print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
+					     req_ctx->decryption_tag,
+					     AES_BLOCK_SIZE, true);
+
+			result = -EBADMSG;
+		}
+	}
+
+	req->complete(req, result);
+}
+
+static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
+{
+	req->complete(req, 0);
+}
+
+
+/*------------------- Hash functions -----------------------------------------*/
+static int
+artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
+		    const u8 *key, unsigned int keylen)
+{
+	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
+	size_t blocksize;
+	int ret;
+
+	if (!keylen) {
+		pr_err("Invalid length (%d) of HMAC key\n",
+			keylen);
+		return -EINVAL;
+	}
+
+	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
+
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	if (keylen > blocksize) {
+		SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
+
+		hdesc->tfm = tfm_ctx->child_hash;
+		hdesc->flags = crypto_ahash_get_flags(tfm) &
+			       CRYPTO_TFM_REQ_MAY_SLEEP;
+
+		tfm_ctx->hmac_key_length = blocksize;
+		ret = crypto_shash_digest(hdesc, key, keylen,
+					  tfm_ctx->hmac_key);
+		if (ret)
+			return ret;
+
+	} else {
+		memcpy(tfm_ctx->hmac_key, key, keylen);
+		tfm_ctx->hmac_key_length = keylen;
+	}
+
+	return 0;
+}
+
+static int
+artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
+{
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	enum artpec6_crypto_variant variant = ac->variant;
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+	u32 oper;
+
+	memset(req_ctx, 0, sizeof(*req_ctx));
+
+	req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
+	if (hmac)
+		req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
+
+	switch (type) {
+	case ARTPEC6_CRYPTO_HASH_SHA1:
+		oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
+		break;
+	case ARTPEC6_CRYPTO_HASH_SHA256:
+		oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
+		break;
+	case ARTPEC6_CRYPTO_HASH_SHA384:
+		oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
+		break;
+	case ARTPEC6_CRYPTO_HASH_SHA512:
+		oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
+		break;
+
+	default:
+		pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
+		return -EINVAL;
+	}
+
+	if (variant == ARTPEC6_CRYPTO)
+		req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
+	else
+		req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
+
+	return 0;
+}
+
+static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+	int ret;
+
+	if (!req_ctx->common.dma) {
+		ret = artpec6_crypto_common_init(&req_ctx->common,
+					  &req->base,
+					  artpec6_crypto_complete_hash,
+					  NULL, 0);
+
+		if (ret)
+			return ret;
+	}
+
+	ret = artpec6_crypto_prepare_hash(req);
+	switch (ret) {
+	case ARTPEC6_CRYPTO_PREPARE_HASH_START:
+		ret = artpec6_crypto_submit(&req_ctx->common);
+		break;
+
+	case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
+		ret = 0;
+		/* Fallthrough */
+
+	default:
+		artpec6_crypto_common_destroy(&req_ctx->common);
+		break;
+	}
+
+	return ret;
+}
+
+static int artpec6_crypto_hash_final(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hash_update(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	req_ctx->hash_flags |= HASH_FLAG_UPDATE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha1_init(struct ahash_request *req)
+{
+	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
+}
+
+static int artpec6_crypto_sha1_digest(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
+
+	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha256_init(struct ahash_request *req)
+{
+	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
+}
+
+static int artpec6_crypto_sha256_digest(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
+	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
+{
+	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
+}
+
+static int __maybe_unused
+artpec6_crypto_sha384_digest(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
+	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha512_init(struct ahash_request *req)
+{
+	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
+}
+
+static int artpec6_crypto_sha512_digest(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
+	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
+{
+	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
+}
+
+static int __maybe_unused
+artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
+{
+	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
+}
+
+static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
+{
+	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
+}
+
+static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
+	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int __maybe_unused
+artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
+	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
+{
+	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
+	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+	return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
+				    const char *base_hash_name)
+{
+	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct artpec6_hash_request_context));
+	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+	if (base_hash_name) {
+		struct crypto_shash *child;
+
+		child = crypto_alloc_shash(base_hash_name, 0,
+					   CRYPTO_ALG_NEED_FALLBACK);
+
+		if (IS_ERR(child))
+			return PTR_ERR(child);
+
+		tfm_ctx->child_hash = child;
+	}
+
+	return 0;
+}
+
+static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
+{
+	return artpec6_crypto_ahash_init_common(tfm, NULL);
+}
+
+static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
+{
+	return artpec6_crypto_ahash_init_common(tfm, "sha256");
+}
+
+static int __maybe_unused
+artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
+{
+	return artpec6_crypto_ahash_init_common(tfm, "sha384");
+}
+
+static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
+{
+	return artpec6_crypto_ahash_init_common(tfm, "sha512");
+}
+
+static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
+{
+	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
+
+	if (tfm_ctx->child_hash)
+		crypto_free_shash(tfm_ctx->child_hash);
+
+	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
+	tfm_ctx->hmac_key_length = 0;
+}
+
+static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
+{
+	const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
+	struct artpec6_hash_export_state *state = out;
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	enum artpec6_crypto_variant variant = ac->variant;
+
+	BUILD_BUG_ON(sizeof(state->partial_buffer) !=
+		     sizeof(ctx->partial_buffer));
+	BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
+
+	state->digcnt = ctx->digcnt;
+	state->partial_bytes = ctx->partial_bytes;
+	state->hash_flags = ctx->hash_flags;
+
+	if (variant == ARTPEC6_CRYPTO)
+		state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
+	else
+		state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
+
+	memcpy(state->partial_buffer, ctx->partial_buffer,
+	       sizeof(state->partial_buffer));
+	memcpy(state->digeststate, ctx->digeststate,
+	       sizeof(state->digeststate));
+
+	return 0;
+}
+
+static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
+{
+	struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
+	const struct artpec6_hash_export_state *state = in;
+	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+	enum artpec6_crypto_variant variant = ac->variant;
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->digcnt = state->digcnt;
+	ctx->partial_bytes = state->partial_bytes;
+	ctx->hash_flags = state->hash_flags;
+
+	if (variant == ARTPEC6_CRYPTO)
+		ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
+	else
+		ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
+
+	memcpy(ctx->partial_buffer, state->partial_buffer,
+	       sizeof(state->partial_buffer));
+	memcpy(ctx->digeststate, state->digeststate,
+	       sizeof(state->digeststate));
+
+	return 0;
+}
+
+static int init_crypto_hw(struct artpec6_crypto *ac)
+{
+	enum artpec6_crypto_variant variant = ac->variant;
+	void __iomem *base = ac->base;
+	u32 out_descr_buf_size;
+	u32 out_data_buf_size;
+	u32 in_data_buf_size;
+	u32 in_descr_buf_size;
+	u32 in_stat_buf_size;
+	u32 in, out;
+
+	/*
+	 * The PDMA unit contains 1984 bytes of internal memory for the OUT
+	 * channels and 1024 bytes for the IN channel. This is an elastic
+	 * memory used to internally store the descriptors and data. The values
+	 * ares specified in 64 byte incremements.  Trustzone buffers are not
+	 * used at this stage.
+	 */
+	out_data_buf_size = 16;  /* 1024 bytes for data */
+	out_descr_buf_size = 15; /* 960 bytes for descriptors */
+	in_data_buf_size = 8;    /* 512 bytes for data */
+	in_descr_buf_size = 4;   /* 256 bytes for descriptors */
+	in_stat_buf_size = 4;   /* 256 bytes for stat descrs */
+
+	BUILD_BUG_ON_MSG((out_data_buf_size
+				+ out_descr_buf_size) * 64 > 1984,
+			  "Invalid OUT configuration");
+
+	BUILD_BUG_ON_MSG((in_data_buf_size
+				+ in_descr_buf_size
+				+ in_stat_buf_size) * 64 > 1024,
+			  "Invalid IN configuration");
+
+	in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
+	     FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
+	     FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
+
+	out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
+	      FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
+
+	writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
+	writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
+
+	if (variant == ARTPEC6_CRYPTO) {
+		writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
+		writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
+		writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
+			       A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
+			       base + A6_PDMA_INTR_MASK);
+	} else {
+		writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
+		writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
+		writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
+			       A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
+			       base + A7_PDMA_INTR_MASK);
+	}
+
+	return 0;
+}
+
+static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
+{
+	enum artpec6_crypto_variant variant = ac->variant;
+	void __iomem *base = ac->base;
+
+	if (variant == ARTPEC6_CRYPTO) {
+		writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
+		writel_relaxed(0, base + A6_PDMA_IN_CFG);
+		writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
+	} else {
+		writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
+		writel_relaxed(0, base + A7_PDMA_IN_CFG);
+		writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
+	}
+
+	writel_relaxed(0, base + PDMA_OUT_CFG);
+
+}
+
+static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
+{
+	struct artpec6_crypto *ac = dev_id;
+	enum artpec6_crypto_variant variant = ac->variant;
+	void __iomem *base = ac->base;
+	u32 mask_in_data, mask_in_eop_flush;
+	u32 in_cmd_flush_stat, in_cmd_reg;
+	u32 ack_intr_reg;
+	u32 ack = 0;
+	u32 intr;
+
+	if (variant == ARTPEC6_CRYPTO) {
+		intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
+		mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
+		mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
+		in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
+		in_cmd_reg = A6_PDMA_IN_CMD;
+		ack_intr_reg = A6_PDMA_ACK_INTR;
+	} else {
+		intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
+		mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
+		mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
+		in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
+		in_cmd_reg = A7_PDMA_IN_CMD;
+		ack_intr_reg = A7_PDMA_ACK_INTR;
+	}
+
+	/* We get two interrupt notifications from each job.
+	 * The in_data means all data was sent to memory and then
+	 * we request a status flush command to write the per-job
+	 * status to its status vector. This ensures that the
+	 * tasklet can detect exactly how many submitted jobs
+	 * that have finished.
+	 */
+	if (intr & mask_in_data)
+		ack |= mask_in_data;
+
+	if (intr & mask_in_eop_flush)
+		ack |= mask_in_eop_flush;
+	else
+		writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
+
+	writel_relaxed(ack, base + ack_intr_reg);
+
+	if (intr & mask_in_eop_flush)
+		tasklet_schedule(&ac->task);
+
+	return IRQ_HANDLED;
+}
+
+/*------------------- Algorithm definitions ----------------------------------*/
+
+/* Hashes */
+static struct ahash_alg hash_algos[] = {
+	/* SHA-1 */
+	{
+		.init = artpec6_crypto_sha1_init,
+		.update = artpec6_crypto_hash_update,
+		.final = artpec6_crypto_hash_final,
+		.digest = artpec6_crypto_sha1_digest,
+		.import = artpec6_crypto_hash_import,
+		.export = artpec6_crypto_hash_export,
+		.halg.digestsize = SHA1_DIGEST_SIZE,
+		.halg.statesize = sizeof(struct artpec6_hash_export_state),
+		.halg.base = {
+			.cra_name = "sha1",
+			.cra_driver_name = "artpec-sha1",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SHA1_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+			.cra_init = artpec6_crypto_ahash_init,
+			.cra_exit = artpec6_crypto_ahash_exit,
+		}
+	},
+	/* SHA-256 */
+	{
+		.init = artpec6_crypto_sha256_init,
+		.update = artpec6_crypto_hash_update,
+		.final = artpec6_crypto_hash_final,
+		.digest = artpec6_crypto_sha256_digest,
+		.import = artpec6_crypto_hash_import,
+		.export = artpec6_crypto_hash_export,
+		.halg.digestsize = SHA256_DIGEST_SIZE,
+		.halg.statesize = sizeof(struct artpec6_hash_export_state),
+		.halg.base = {
+			.cra_name = "sha256",
+			.cra_driver_name = "artpec-sha256",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SHA256_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+			.cra_init = artpec6_crypto_ahash_init,
+			.cra_exit = artpec6_crypto_ahash_exit,
+		}
+	},
+	/* HMAC SHA-256 */
+	{
+		.init = artpec6_crypto_hmac_sha256_init,
+		.update = artpec6_crypto_hash_update,
+		.final = artpec6_crypto_hash_final,
+		.digest = artpec6_crypto_hmac_sha256_digest,
+		.import = artpec6_crypto_hash_import,
+		.export = artpec6_crypto_hash_export,
+		.setkey = artpec6_crypto_hash_set_key,
+		.halg.digestsize = SHA256_DIGEST_SIZE,
+		.halg.statesize = sizeof(struct artpec6_hash_export_state),
+		.halg.base = {
+			.cra_name = "hmac(sha256)",
+			.cra_driver_name = "artpec-hmac-sha256",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SHA256_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+			.cra_init = artpec6_crypto_ahash_init_hmac_sha256,
+			.cra_exit = artpec6_crypto_ahash_exit,
+		}
+	},
+};
+
+static struct ahash_alg artpec7_hash_algos[] = {
+	/* SHA-384 */
+	{
+		.init = artpec6_crypto_sha384_init,
+		.update = artpec6_crypto_hash_update,
+		.final = artpec6_crypto_hash_final,
+		.digest = artpec6_crypto_sha384_digest,
+		.import = artpec6_crypto_hash_import,
+		.export = artpec6_crypto_hash_export,
+		.halg.digestsize = SHA384_DIGEST_SIZE,
+		.halg.statesize = sizeof(struct artpec6_hash_export_state),
+		.halg.base = {
+			.cra_name = "sha384",
+			.cra_driver_name = "artpec-sha384",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SHA384_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+			.cra_init = artpec6_crypto_ahash_init,
+			.cra_exit = artpec6_crypto_ahash_exit,
+		}
+	},
+	/* HMAC SHA-384 */
+	{
+		.init = artpec6_crypto_hmac_sha384_init,
+		.update = artpec6_crypto_hash_update,
+		.final = artpec6_crypto_hash_final,
+		.digest = artpec6_crypto_hmac_sha384_digest,
+		.import = artpec6_crypto_hash_import,
+		.export = artpec6_crypto_hash_export,
+		.setkey = artpec6_crypto_hash_set_key,
+		.halg.digestsize = SHA384_DIGEST_SIZE,
+		.halg.statesize = sizeof(struct artpec6_hash_export_state),
+		.halg.base = {
+			.cra_name = "hmac(sha384)",
+			.cra_driver_name = "artpec-hmac-sha384",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SHA384_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+			.cra_init = artpec6_crypto_ahash_init_hmac_sha384,
+			.cra_exit = artpec6_crypto_ahash_exit,
+		}
+	},
+	/* SHA-512 */
+	{
+		.init = artpec6_crypto_sha512_init,
+		.update = artpec6_crypto_hash_update,
+		.final = artpec6_crypto_hash_final,
+		.digest = artpec6_crypto_sha512_digest,
+		.import = artpec6_crypto_hash_import,
+		.export = artpec6_crypto_hash_export,
+		.halg.digestsize = SHA512_DIGEST_SIZE,
+		.halg.statesize = sizeof(struct artpec6_hash_export_state),
+		.halg.base = {
+			.cra_name = "sha512",
+			.cra_driver_name = "artpec-sha512",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SHA512_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+			.cra_init = artpec6_crypto_ahash_init,
+			.cra_exit = artpec6_crypto_ahash_exit,
+		}
+	},
+	/* HMAC SHA-512 */
+	{
+		.init = artpec6_crypto_hmac_sha512_init,
+		.update = artpec6_crypto_hash_update,
+		.final = artpec6_crypto_hash_final,
+		.digest = artpec6_crypto_hmac_sha512_digest,
+		.import = artpec6_crypto_hash_import,
+		.export = artpec6_crypto_hash_export,
+		.setkey = artpec6_crypto_hash_set_key,
+		.halg.digestsize = SHA512_DIGEST_SIZE,
+		.halg.statesize = sizeof(struct artpec6_hash_export_state),
+		.halg.base = {
+			.cra_name = "hmac(sha512)",
+			.cra_driver_name = "artpec-hmac-sha512",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SHA512_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+			.cra_init = artpec6_crypto_ahash_init_hmac_sha512,
+			.cra_exit = artpec6_crypto_ahash_exit,
+		}
+	},
+};
+
+/* Crypto */
+static struct skcipher_alg crypto_algos[] = {
+	/* AES - ECB */
+	{
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "artpec6-ecb-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = artpec6_crypto_cipher_set_key,
+		.encrypt = artpec6_crypto_encrypt,
+		.decrypt = artpec6_crypto_decrypt,
+		.init = artpec6_crypto_aes_ecb_init,
+		.exit = artpec6_crypto_aes_exit,
+	},
+	/* AES - CTR */
+	{
+		.base = {
+			.cra_name = "ctr(aes)",
+			.cra_driver_name = "artpec6-ctr-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_NEED_FALLBACK,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+		.setkey = artpec6_crypto_cipher_set_key,
+		.encrypt = artpec6_crypto_ctr_encrypt,
+		.decrypt = artpec6_crypto_ctr_decrypt,
+		.init = artpec6_crypto_aes_ctr_init,
+		.exit = artpec6_crypto_aes_ctr_exit,
+	},
+	/* AES - CBC */
+	{
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "artpec6-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+		.setkey = artpec6_crypto_cipher_set_key,
+		.encrypt = artpec6_crypto_encrypt,
+		.decrypt = artpec6_crypto_decrypt,
+		.init = artpec6_crypto_aes_cbc_init,
+		.exit = artpec6_crypto_aes_exit
+	},
+	/* AES - XTS */
+	{
+		.base = {
+			.cra_name = "xts(aes)",
+			.cra_driver_name = "artpec6-xts-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = 2*AES_MIN_KEY_SIZE,
+		.max_keysize = 2*AES_MAX_KEY_SIZE,
+		.ivsize = 16,
+		.setkey = artpec6_crypto_xts_set_key,
+		.encrypt = artpec6_crypto_encrypt,
+		.decrypt = artpec6_crypto_decrypt,
+		.init = artpec6_crypto_aes_xts_init,
+		.exit = artpec6_crypto_aes_exit,
+	},
+};
+
+static struct aead_alg aead_algos[] = {
+	{
+		.init   = artpec6_crypto_aead_init,
+		.setkey = artpec6_crypto_aead_set_key,
+		.encrypt = artpec6_crypto_aead_encrypt,
+		.decrypt = artpec6_crypto_aead_decrypt,
+		.ivsize = GCM_AES_IV_SIZE,
+		.maxauthsize = AES_BLOCK_SIZE,
+
+		.base = {
+			.cra_name = "gcm(aes)",
+			.cra_driver_name = "artpec-gcm-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+			.cra_alignmask = 3,
+			.cra_module = THIS_MODULE,
+		},
+	}
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+struct dbgfs_u32 {
+	char *name;
+	mode_t mode;
+	u32 *flag;
+	char *desc;
+};
+
+static struct dentry *dbgfs_root;
+
+static void artpec6_crypto_init_debugfs(void)
+{
+	dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
+
+	if (!dbgfs_root || IS_ERR(dbgfs_root)) {
+		dbgfs_root = NULL;
+		pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
+		return;
+	}
+
+#ifdef CONFIG_FAULT_INJECTION
+	fault_create_debugfs_attr("fail_status_read", dbgfs_root,
+				  &artpec6_crypto_fail_status_read);
+
+	fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
+				  &artpec6_crypto_fail_dma_array_full);
+#endif
+}
+
+static void artpec6_crypto_free_debugfs(void)
+{
+	if (!dbgfs_root)
+		return;
+
+	debugfs_remove_recursive(dbgfs_root);
+	dbgfs_root = NULL;
+}
+#endif
+
+static const struct of_device_id artpec6_crypto_of_match[] = {
+	{ .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
+	{ .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
+	{}
+};
+MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
+
+static int artpec6_crypto_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	enum artpec6_crypto_variant variant;
+	struct artpec6_crypto *ac;
+	struct device *dev = &pdev->dev;
+	void __iomem *base;
+	struct resource *res;
+	int irq;
+	int err;
+
+	if (artpec6_crypto_dev)
+		return -ENODEV;
+
+	match = of_match_node(artpec6_crypto_of_match, dev->of_node);
+	if (!match)
+		return -EINVAL;
+
+	variant = (enum artpec6_crypto_variant)match->data;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return -ENODEV;
+
+	ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
+			  GFP_KERNEL);
+	if (!ac)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, ac);
+	ac->variant = variant;
+
+	spin_lock_init(&ac->queue_lock);
+	INIT_LIST_HEAD(&ac->queue);
+	INIT_LIST_HEAD(&ac->pending);
+	timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
+
+	ac->base = base;
+
+	ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
+		sizeof(struct artpec6_crypto_dma_descriptors),
+		64,
+		0,
+		NULL);
+	if (!ac->dma_cache)
+		return -ENOMEM;
+
+#ifdef CONFIG_DEBUG_FS
+	artpec6_crypto_init_debugfs();
+#endif
+
+	tasklet_init(&ac->task, artpec6_crypto_task,
+		     (unsigned long)ac);
+
+	ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+				      GFP_KERNEL);
+	if (!ac->pad_buffer)
+		return -ENOMEM;
+	ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
+
+	ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+				      GFP_KERNEL);
+	if (!ac->zero_buffer)
+		return -ENOMEM;
+	ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
+
+	err = init_crypto_hw(ac);
+	if (err)
+		goto free_cache;
+
+	err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
+			       "artpec6-crypto", ac);
+	if (err)
+		goto disable_hw;
+
+	artpec6_crypto_dev = &pdev->dev;
+
+	err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+	if (err) {
+		dev_err(dev, "Failed to register ahashes\n");
+		goto disable_hw;
+	}
+
+	if (variant != ARTPEC6_CRYPTO) {
+		err = crypto_register_ahashes(artpec7_hash_algos,
+					      ARRAY_SIZE(artpec7_hash_algos));
+		if (err) {
+			dev_err(dev, "Failed to register ahashes\n");
+			goto unregister_ahashes;
+		}
+	}
+
+	err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+	if (err) {
+		dev_err(dev, "Failed to register ciphers\n");
+		goto unregister_a7_ahashes;
+	}
+
+	err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
+	if (err) {
+		dev_err(dev, "Failed to register aeads\n");
+		goto unregister_algs;
+	}
+
+	return 0;
+
+unregister_algs:
+	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+unregister_a7_ahashes:
+	if (variant != ARTPEC6_CRYPTO)
+		crypto_unregister_ahashes(artpec7_hash_algos,
+					  ARRAY_SIZE(artpec7_hash_algos));
+unregister_ahashes:
+	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+disable_hw:
+	artpec6_crypto_disable_hw(ac);
+free_cache:
+	kmem_cache_destroy(ac->dma_cache);
+	return err;
+}
+
+static int artpec6_crypto_remove(struct platform_device *pdev)
+{
+	struct artpec6_crypto *ac = platform_get_drvdata(pdev);
+	int irq = platform_get_irq(pdev, 0);
+
+	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+	if (ac->variant != ARTPEC6_CRYPTO)
+		crypto_unregister_ahashes(artpec7_hash_algos,
+					  ARRAY_SIZE(artpec7_hash_algos));
+	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+	crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
+
+	tasklet_disable(&ac->task);
+	devm_free_irq(&pdev->dev, irq, ac);
+	tasklet_kill(&ac->task);
+	del_timer_sync(&ac->timer);
+
+	artpec6_crypto_disable_hw(ac);
+
+	kmem_cache_destroy(ac->dma_cache);
+#ifdef CONFIG_DEBUG_FS
+	artpec6_crypto_free_debugfs();
+#endif
+	return 0;
+}
+
+static struct platform_driver artpec6_crypto_driver = {
+	.probe   = artpec6_crypto_probe,
+	.remove  = artpec6_crypto_remove,
+	.driver  = {
+		.name  = "artpec6-crypto",
+		.owner = THIS_MODULE,
+		.of_match_table = artpec6_crypto_of_match,
+	},
+};
+
+module_platform_driver(artpec6_crypto_driver);
+
+MODULE_AUTHOR("Axis Communications AB");
+MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/bcm/Makefile b/drivers/crypto/bcm/Makefile
new file mode 100644
index 0000000..13cb80e
--- /dev/null
+++ b/drivers/crypto/bcm/Makefile
@@ -0,0 +1,15 @@
+# File: drivers/crypto/bcm/Makefile
+#
+# Makefile for crypto acceleration files for Broadcom SPU driver
+#
+# Uncomment to enable debug tracing in the SPU driver.
+# CFLAGS_util.o := -DDEBUG
+# CFLAGS_cipher.o := -DDEBUG
+# CFLAGS_spu.o := -DDEBUG
+# CFLAGS_spu2.o := -DDEBUG
+
+obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) := bcm_crypto_spu.o
+
+bcm_crypto_spu-objs :=  util.o spu.o spu2.o cipher.o
+
+ccflags-y += -I. -DBCMDRIVER
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
new file mode 100644
index 0000000..2d1f1db
--- /dev/null
+++ b/drivers/crypto/bcm/cipher.c
@@ -0,0 +1,4943 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/kthread.h>
+#include <linux/rtnetlink.h>
+#include <linux/sched.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/hmac.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/sha3.h>
+
+#include "util.h"
+#include "cipher.h"
+#include "spu.h"
+#include "spum.h"
+#include "spu2.h"
+
+/* ================= Device Structure ================== */
+
+struct device_private iproc_priv;
+
+/* ==================== Parameters ===================== */
+
+int flow_debug_logging;
+module_param(flow_debug_logging, int, 0644);
+MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
+
+int packet_debug_logging;
+module_param(packet_debug_logging, int, 0644);
+MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
+
+int debug_logging_sleep;
+module_param(debug_logging_sleep, int, 0644);
+MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
+
+/*
+ * The value of these module parameters is used to set the priority for each
+ * algo type when this driver registers algos with the kernel crypto API.
+ * To use a priority other than the default, set the priority in the insmod or
+ * modprobe. Changing the module priority after init time has no effect.
+ *
+ * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
+ * algos, but more preferred than generic software algos.
+ */
+static int cipher_pri = 150;
+module_param(cipher_pri, int, 0644);
+MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
+
+static int hash_pri = 100;
+module_param(hash_pri, int, 0644);
+MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
+
+static int aead_pri = 150;
+module_param(aead_pri, int, 0644);
+MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
+
+/* A type 3 BCM header, expected to precede the SPU header for SPU-M.
+ * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
+ * 0x60 - ring 0
+ * 0x68 - ring 1
+ * 0x70 - ring 2
+ * 0x78 - ring 3
+ */
+char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
+/*
+ * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
+ * is set dynamically after reading SPU type from device tree.
+ */
+#define BCM_HDR_LEN  iproc_priv.bcm_hdr_len
+
+/* min and max time to sleep before retrying when mbox queue is full. usec */
+#define MBOX_SLEEP_MIN  800
+#define MBOX_SLEEP_MAX 1000
+
+/**
+ * select_channel() - Select a SPU channel to handle a crypto request. Selects
+ * channel in round robin order.
+ *
+ * Return:  channel index
+ */
+static u8 select_channel(void)
+{
+	u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
+
+	return chan_idx % iproc_priv.spu.num_chan;
+}
+
+/**
+ * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
+ * receive a SPU response message for an ablkcipher request. Includes buffers to
+ * catch SPU message headers and the response data.
+ * @mssg:	mailbox message containing the receive sg
+ * @rctx:	crypto request context
+ * @rx_frag_num: number of scatterlist elements required to hold the
+ *		SPU response message
+ * @chunksize:	Number of bytes of response data expected
+ * @stat_pad_len: Number of bytes required to pad the STAT field to
+ *		a 4-byte boundary
+ *
+ * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
+ * when the request completes, whether the request is handled successfully or
+ * there is an error.
+ *
+ * Returns:
+ *   0 if successful
+ *   < 0 if an error
+ */
+static int
+spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
+			    struct iproc_reqctx_s *rctx,
+			    u8 rx_frag_num,
+			    unsigned int chunksize, u32 stat_pad_len)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct scatterlist *sg;	/* used to build sgs in mbox message */
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	u32 datalen;		/* Number of bytes of response data expected */
+
+	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
+				rctx->gfp);
+	if (!mssg->spu.dst)
+		return -ENOMEM;
+
+	sg = mssg->spu.dst;
+	sg_init_table(sg, rx_frag_num);
+	/* Space for SPU message header */
+	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
+
+	/* If XTS tweak in payload, add buffer to receive encrypted tweak */
+	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
+	    spu->spu_xts_tweak_in_payload())
+		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
+			   SPU_XTS_TWEAK_SIZE);
+
+	/* Copy in each dst sg entry from request, up to chunksize */
+	datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
+				 rctx->dst_nents, chunksize);
+	if (datalen < chunksize) {
+		pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
+		       __func__, chunksize, datalen);
+		return -EFAULT;
+	}
+
+	if (ctx->cipher.alg == CIPHER_ALG_RC4)
+		/* Add buffer to catch 260-byte SUPDT field for RC4 */
+		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
+
+	if (stat_pad_len)
+		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
+
+	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
+	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
+
+	return 0;
+}
+
+/**
+ * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
+ * send a SPU request message for an ablkcipher request. Includes SPU message
+ * headers and the request data.
+ * @mssg:	mailbox message containing the transmit sg
+ * @rctx:	crypto request context
+ * @tx_frag_num: number of scatterlist elements required to construct the
+ *		SPU request message
+ * @chunksize:	Number of bytes of request data
+ * @pad_len:	Number of pad bytes
+ *
+ * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
+ * when the request completes, whether the request is handled successfully or
+ * there is an error.
+ *
+ * Returns:
+ *   0 if successful
+ *   < 0 if an error
+ */
+static int
+spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
+			    struct iproc_reqctx_s *rctx,
+			    u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct scatterlist *sg;	/* used to build sgs in mbox message */
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	u32 datalen;		/* Number of bytes of response data expected */
+	u32 stat_len;
+
+	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
+				rctx->gfp);
+	if (unlikely(!mssg->spu.src))
+		return -ENOMEM;
+
+	sg = mssg->spu.src;
+	sg_init_table(sg, tx_frag_num);
+
+	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
+		   BCM_HDR_LEN + ctx->spu_req_hdr_len);
+
+	/* if XTS tweak in payload, copy from IV (where crypto API puts it) */
+	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
+	    spu->spu_xts_tweak_in_payload())
+		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
+
+	/* Copy in each src sg entry from request, up to chunksize */
+	datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
+				 rctx->src_nents, chunksize);
+	if (unlikely(datalen < chunksize)) {
+		pr_err("%s(): failed to copy src sg to mbox msg",
+		       __func__);
+		return -EFAULT;
+	}
+
+	if (pad_len)
+		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
+
+	stat_len = spu->spu_tx_status_len();
+	if (stat_len) {
+		memset(rctx->msg_buf.tx_stat, 0, stat_len);
+		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
+	}
+	return 0;
+}
+
+static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
+				u8 chan_idx)
+{
+	int err;
+	int retry_cnt = 0;
+	struct device *dev = &(iproc_priv.pdev->dev);
+
+	err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
+	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
+		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
+			/*
+			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
+			 * not in atomic context and we can wait and try again.
+			 */
+			retry_cnt++;
+			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
+			err = mbox_send_message(iproc_priv.mbox[chan_idx],
+						mssg);
+			atomic_inc(&iproc_priv.mb_no_spc);
+		}
+	}
+	if (err < 0) {
+		atomic_inc(&iproc_priv.mb_send_fail);
+		return err;
+	}
+
+	/* Check error returned by mailbox controller */
+	err = mssg->error;
+	if (unlikely(err < 0)) {
+		dev_err(dev, "message error %d", err);
+		/* Signal txdone for mailbox channel */
+	}
+
+	/* Signal txdone for mailbox channel */
+	mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
+	return err;
+}
+
+/**
+ * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
+ * a single SPU request message, starting at the current position in the request
+ * data.
+ * @rctx:	Crypto request context
+ *
+ * This may be called on the crypto API thread, or, when a request is so large
+ * it must be broken into multiple SPU messages, on the thread used to invoke
+ * the response callback. When requests are broken into multiple SPU
+ * messages, we assume subsequent messages depend on previous results, and
+ * thus always wait for previous results before submitting the next message.
+ * Because requests are submitted in lock step like this, there is no need
+ * to synchronize access to request data structures.
+ *
+ * Return: -EINPROGRESS: request has been accepted and result will be returned
+ *			 asynchronously
+ *         Any other value indicates an error
+ */
+static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct crypto_async_request *areq = rctx->parent;
+	struct ablkcipher_request *req =
+	    container_of(areq, struct ablkcipher_request, base);
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	struct spu_cipher_parms cipher_parms;
+	int err = 0;
+	unsigned int chunksize = 0;	/* Num bytes of request to submit */
+	int remaining = 0;	/* Bytes of request still to process */
+	int chunk_start;	/* Beginning of data for current SPU msg */
+
+	/* IV or ctr value to use in this SPU msg */
+	u8 local_iv_ctr[MAX_IV_SIZE];
+	u32 stat_pad_len;	/* num bytes to align status field */
+	u32 pad_len;		/* total length of all padding */
+	bool update_key = false;
+	struct brcm_message *mssg;	/* mailbox message */
+
+	/* number of entries in src and dst sg in mailbox message. */
+	u8 rx_frag_num = 2;	/* response header and STATUS */
+	u8 tx_frag_num = 1;	/* request header */
+
+	flow_log("%s\n", __func__);
+
+	cipher_parms.alg = ctx->cipher.alg;
+	cipher_parms.mode = ctx->cipher.mode;
+	cipher_parms.type = ctx->cipher_type;
+	cipher_parms.key_len = ctx->enckeylen;
+	cipher_parms.key_buf = ctx->enckey;
+	cipher_parms.iv_buf = local_iv_ctr;
+	cipher_parms.iv_len = rctx->iv_ctr_len;
+
+	mssg = &rctx->mb_mssg;
+	chunk_start = rctx->src_sent;
+	remaining = rctx->total_todo - chunk_start;
+
+	/* determine the chunk we are breaking off and update the indexes */
+	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
+	    (remaining > ctx->max_payload))
+		chunksize = ctx->max_payload;
+	else
+		chunksize = remaining;
+
+	rctx->src_sent += chunksize;
+	rctx->total_sent = rctx->src_sent;
+
+	/* Count number of sg entries to be included in this request */
+	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
+	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
+
+	if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
+	    rctx->is_encrypt && chunk_start)
+		/*
+		 * Encrypting non-first first chunk. Copy last block of
+		 * previous result to IV for this chunk.
+		 */
+		sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
+				    rctx->iv_ctr_len,
+				    chunk_start - rctx->iv_ctr_len);
+
+	if (rctx->iv_ctr_len) {
+		/* get our local copy of the iv */
+		__builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
+				 rctx->iv_ctr_len);
+
+		/* generate the next IV if possible */
+		if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
+		    !rctx->is_encrypt) {
+			/*
+			 * CBC Decrypt: next IV is the last ciphertext block in
+			 * this chunk
+			 */
+			sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
+					    rctx->iv_ctr_len,
+					    rctx->src_sent - rctx->iv_ctr_len);
+		} else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
+			/*
+			 * The SPU hardware increments the counter once for
+			 * each AES block of 16 bytes. So update the counter
+			 * for the next chunk, if there is one. Note that for
+			 * this chunk, the counter has already been copied to
+			 * local_iv_ctr. We can assume a block size of 16,
+			 * because we only support CTR mode for AES, not for
+			 * any other cipher alg.
+			 */
+			add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
+		}
+	}
+
+	if (ctx->cipher.alg == CIPHER_ALG_RC4) {
+		rx_frag_num++;
+		if (chunk_start) {
+			/*
+			 * for non-first RC4 chunks, use SUPDT from previous
+			 * response as key for this chunk.
+			 */
+			cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
+			update_key = true;
+			cipher_parms.type = CIPHER_TYPE_UPDT;
+		} else if (!rctx->is_encrypt) {
+			/*
+			 * First RC4 chunk. For decrypt, key in pre-built msg
+			 * header may have been changed if encrypt required
+			 * multiple chunks. So revert the key to the
+			 * ctx->enckey value.
+			 */
+			update_key = true;
+			cipher_parms.type = CIPHER_TYPE_INIT;
+		}
+	}
+
+	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
+		flow_log("max_payload infinite\n");
+	else
+		flow_log("max_payload %u\n", ctx->max_payload);
+
+	flow_log("sent:%u start:%u remains:%u size:%u\n",
+		 rctx->src_sent, chunk_start, remaining, chunksize);
+
+	/* Copy SPU header template created at setkey time */
+	memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
+	       sizeof(rctx->msg_buf.bcm_spu_req_hdr));
+
+	/*
+	 * Pass SUPDT field as key. Key field in finish() call is only used
+	 * when update_key has been set above for RC4. Will be ignored in
+	 * all other cases.
+	 */
+	spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
+				   ctx->spu_req_hdr_len, !(rctx->is_encrypt),
+				   &cipher_parms, update_key, chunksize);
+
+	atomic64_add(chunksize, &iproc_priv.bytes_out);
+
+	stat_pad_len = spu->spu_wordalign_padlen(chunksize);
+	if (stat_pad_len)
+		rx_frag_num++;
+	pad_len = stat_pad_len;
+	if (pad_len) {
+		tx_frag_num++;
+		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
+				     0, ctx->auth.alg, ctx->auth.mode,
+				     rctx->total_sent, stat_pad_len);
+	}
+
+	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
+			      ctx->spu_req_hdr_len);
+	packet_log("payload:\n");
+	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
+	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
+
+	/*
+	 * Build mailbox message containing SPU request msg and rx buffers
+	 * to catch response message
+	 */
+	memset(mssg, 0, sizeof(*mssg));
+	mssg->type = BRCM_MESSAGE_SPU;
+	mssg->ctx = rctx;	/* Will be returned in response */
+
+	/* Create rx scatterlist to catch result */
+	rx_frag_num += rctx->dst_nents;
+
+	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
+	    spu->spu_xts_tweak_in_payload())
+		rx_frag_num++;	/* extra sg to insert tweak */
+
+	err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
+					  stat_pad_len);
+	if (err)
+		return err;
+
+	/* Create tx scatterlist containing SPU request message */
+	tx_frag_num += rctx->src_nents;
+	if (spu->spu_tx_status_len())
+		tx_frag_num++;
+
+	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
+	    spu->spu_xts_tweak_in_payload())
+		tx_frag_num++;	/* extra sg to insert tweak */
+
+	err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
+					  pad_len);
+	if (err)
+		return err;
+
+	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+	if (unlikely(err < 0))
+		return err;
+
+	return -EINPROGRESS;
+}
+
+/**
+ * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
+ * total received count for the request and updates global stats.
+ * @rctx:	Crypto request context
+ */
+static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+#ifdef DEBUG
+	struct crypto_async_request *areq = rctx->parent;
+	struct ablkcipher_request *req = ablkcipher_request_cast(areq);
+#endif
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	u32 payload_len;
+
+	/* See how much data was returned */
+	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
+
+	/*
+	 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
+	 * encrypted tweak ("i") value; we don't count those.
+	 */
+	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
+	    spu->spu_xts_tweak_in_payload() &&
+	    (payload_len >= SPU_XTS_TWEAK_SIZE))
+		payload_len -= SPU_XTS_TWEAK_SIZE;
+
+	atomic64_add(payload_len, &iproc_priv.bytes_in);
+
+	flow_log("%s() offset: %u, bd_len: %u BD:\n",
+		 __func__, rctx->total_received, payload_len);
+
+	dump_sg(req->dst, rctx->total_received, payload_len);
+	if (ctx->cipher.alg == CIPHER_ALG_RC4)
+		packet_dump("  supdt ", rctx->msg_buf.c.supdt_tweak,
+			    SPU_SUPDT_LEN);
+
+	rctx->total_received += payload_len;
+	if (rctx->total_received == rctx->total_todo) {
+		atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
+		atomic_inc(
+		   &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
+	}
+}
+
+/**
+ * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
+ * receive a SPU response message for an ahash request.
+ * @mssg:	mailbox message containing the receive sg
+ * @rctx:	crypto request context
+ * @rx_frag_num: number of scatterlist elements required to hold the
+ *		SPU response message
+ * @digestsize: length of hash digest, in bytes
+ * @stat_pad_len: Number of bytes required to pad the STAT field to
+ *		a 4-byte boundary
+ *
+ * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
+ * when the request completes, whether the request is handled successfully or
+ * there is an error.
+ *
+ * Return:
+ *   0 if successful
+ *   < 0 if an error
+ */
+static int
+spu_ahash_rx_sg_create(struct brcm_message *mssg,
+		       struct iproc_reqctx_s *rctx,
+		       u8 rx_frag_num, unsigned int digestsize,
+		       u32 stat_pad_len)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct scatterlist *sg;	/* used to build sgs in mbox message */
+	struct iproc_ctx_s *ctx = rctx->ctx;
+
+	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
+				rctx->gfp);
+	if (!mssg->spu.dst)
+		return -ENOMEM;
+
+	sg = mssg->spu.dst;
+	sg_init_table(sg, rx_frag_num);
+	/* Space for SPU message header */
+	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
+
+	/* Space for digest */
+	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
+
+	if (stat_pad_len)
+		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
+
+	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
+	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
+	return 0;
+}
+
+/**
+ * spu_ahash_tx_sg_create() -  Build up the scatterlist of buffers used to send
+ * a SPU request message for an ahash request. Includes SPU message headers and
+ * the request data.
+ * @mssg:	mailbox message containing the transmit sg
+ * @rctx:	crypto request context
+ * @tx_frag_num: number of scatterlist elements required to construct the
+ *		SPU request message
+ * @spu_hdr_len: length in bytes of SPU message header
+ * @hash_carry_len: Number of bytes of data carried over from previous req
+ * @new_data_len: Number of bytes of new request data
+ * @pad_len:	Number of pad bytes
+ *
+ * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
+ * when the request completes, whether the request is handled successfully or
+ * there is an error.
+ *
+ * Return:
+ *   0 if successful
+ *   < 0 if an error
+ */
+static int
+spu_ahash_tx_sg_create(struct brcm_message *mssg,
+		       struct iproc_reqctx_s *rctx,
+		       u8 tx_frag_num,
+		       u32 spu_hdr_len,
+		       unsigned int hash_carry_len,
+		       unsigned int new_data_len, u32 pad_len)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct scatterlist *sg;	/* used to build sgs in mbox message */
+	u32 datalen;		/* Number of bytes of response data expected */
+	u32 stat_len;
+
+	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
+				rctx->gfp);
+	if (!mssg->spu.src)
+		return -ENOMEM;
+
+	sg = mssg->spu.src;
+	sg_init_table(sg, tx_frag_num);
+
+	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
+		   BCM_HDR_LEN + spu_hdr_len);
+
+	if (hash_carry_len)
+		sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
+
+	if (new_data_len) {
+		/* Copy in each src sg entry from request, up to chunksize */
+		datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
+					 rctx->src_nents, new_data_len);
+		if (datalen < new_data_len) {
+			pr_err("%s(): failed to copy src sg to mbox msg",
+			       __func__);
+			return -EFAULT;
+		}
+	}
+
+	if (pad_len)
+		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
+
+	stat_len = spu->spu_tx_status_len();
+	if (stat_len) {
+		memset(rctx->msg_buf.tx_stat, 0, stat_len);
+		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
+	}
+
+	return 0;
+}
+
+/**
+ * handle_ahash_req() - Process an asynchronous hash request from the crypto
+ * API.
+ * @rctx:  Crypto request context
+ *
+ * Builds a SPU request message embedded in a mailbox message and submits the
+ * mailbox message on a selected mailbox channel. The SPU request message is
+ * constructed as a scatterlist, including entries from the crypto API's
+ * src scatterlist to avoid copying the data to be hashed. This function is
+ * called either on the thread from the crypto API, or, in the case that the
+ * crypto API request is too large to fit in a single SPU request message,
+ * on the thread that invokes the receive callback with a response message.
+ * Because some operations require the response from one chunk before the next
+ * chunk can be submitted, we always wait for the response for the previous
+ * chunk before submitting the next chunk. Because requests are submitted in
+ * lock step like this, there is no need to synchronize access to request data
+ * structures.
+ *
+ * Return:
+ *   -EINPROGRESS: request has been submitted to SPU and response will be
+ *		   returned asynchronously
+ *   -EAGAIN:      non-final request included a small amount of data, which for
+ *		   efficiency we did not submit to the SPU, but instead stored
+ *		   to be submitted to the SPU with the next part of the request
+ *   other:        an error code
+ */
+static int handle_ahash_req(struct iproc_reqctx_s *rctx)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct crypto_async_request *areq = rctx->parent;
+	struct ahash_request *req = ahash_request_cast(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
+	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
+	struct iproc_ctx_s *ctx = rctx->ctx;
+
+	/* number of bytes still to be hashed in this req */
+	unsigned int nbytes_to_hash = 0;
+	int err = 0;
+	unsigned int chunksize = 0;	/* length of hash carry + new data */
+	/*
+	 * length of new data, not from hash carry, to be submitted in
+	 * this hw request
+	 */
+	unsigned int new_data_len;
+
+	unsigned int chunk_start = 0;
+	u32 db_size;	 /* Length of data field, incl gcm and hash padding */
+	int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
+	u32 data_pad_len = 0;	/* length of GCM/CCM padding */
+	u32 stat_pad_len = 0;	/* length of padding to align STATUS word */
+	struct brcm_message *mssg;	/* mailbox message */
+	struct spu_request_opts req_opts;
+	struct spu_cipher_parms cipher_parms;
+	struct spu_hash_parms hash_parms;
+	struct spu_aead_parms aead_parms;
+	unsigned int local_nbuf;
+	u32 spu_hdr_len;
+	unsigned int digestsize;
+	u16 rem = 0;
+
+	/*
+	 * number of entries in src and dst sg. Always includes SPU msg header.
+	 * rx always includes a buffer to catch digest and STATUS.
+	 */
+	u8 rx_frag_num = 3;
+	u8 tx_frag_num = 1;
+
+	flow_log("total_todo %u, total_sent %u\n",
+		 rctx->total_todo, rctx->total_sent);
+
+	memset(&req_opts, 0, sizeof(req_opts));
+	memset(&cipher_parms, 0, sizeof(cipher_parms));
+	memset(&hash_parms, 0, sizeof(hash_parms));
+	memset(&aead_parms, 0, sizeof(aead_parms));
+
+	req_opts.bd_suppress = true;
+	hash_parms.alg = ctx->auth.alg;
+	hash_parms.mode = ctx->auth.mode;
+	hash_parms.type = HASH_TYPE_NONE;
+	hash_parms.key_buf = (u8 *)ctx->authkey;
+	hash_parms.key_len = ctx->authkeylen;
+
+	/*
+	 * For hash algorithms below assignment looks bit odd but
+	 * it's needed for AES-XCBC and AES-CMAC hash algorithms
+	 * to differentiate between 128, 192, 256 bit key values.
+	 * Based on the key values, hash algorithm is selected.
+	 * For example for 128 bit key, hash algorithm is AES-128.
+	 */
+	cipher_parms.type = ctx->cipher_type;
+
+	mssg = &rctx->mb_mssg;
+	chunk_start = rctx->src_sent;
+
+	/*
+	 * Compute the amount remaining to hash. This may include data
+	 * carried over from previous requests.
+	 */
+	nbytes_to_hash = rctx->total_todo - rctx->total_sent;
+	chunksize = nbytes_to_hash;
+	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
+	    (chunksize > ctx->max_payload))
+		chunksize = ctx->max_payload;
+
+	/*
+	 * If this is not a final request and the request data is not a multiple
+	 * of a full block, then simply park the extra data and prefix it to the
+	 * data for the next request.
+	 */
+	if (!rctx->is_final) {
+		u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
+		u16 new_len;  /* len of data to add to hash carry */
+
+		rem = chunksize % blocksize;   /* remainder */
+		if (rem) {
+			/* chunksize not a multiple of blocksize */
+			chunksize -= rem;
+			if (chunksize == 0) {
+				/* Don't have a full block to submit to hw */
+				new_len = rem - rctx->hash_carry_len;
+				sg_copy_part_to_buf(req->src, dest, new_len,
+						    rctx->src_sent);
+				rctx->hash_carry_len = rem;
+				flow_log("Exiting with hash carry len: %u\n",
+					 rctx->hash_carry_len);
+				packet_dump("  buf: ",
+					    rctx->hash_carry,
+					    rctx->hash_carry_len);
+				return -EAGAIN;
+			}
+		}
+	}
+
+	/* if we have hash carry, then prefix it to the data in this request */
+	local_nbuf = rctx->hash_carry_len;
+	rctx->hash_carry_len = 0;
+	if (local_nbuf)
+		tx_frag_num++;
+	new_data_len = chunksize - local_nbuf;
+
+	/* Count number of sg entries to be used in this request */
+	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
+				       new_data_len);
+
+	/* AES hashing keeps key size in type field, so need to copy it here */
+	if (hash_parms.alg == HASH_ALG_AES)
+		hash_parms.type = (enum hash_type)cipher_parms.type;
+	else
+		hash_parms.type = spu->spu_hash_type(rctx->total_sent);
+
+	digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
+					  hash_parms.type);
+	hash_parms.digestsize =	digestsize;
+
+	/* update the indexes */
+	rctx->total_sent += chunksize;
+	/* if you sent a prebuf then that wasn't from this req->src */
+	rctx->src_sent += new_data_len;
+
+	if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
+		hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
+							   hash_parms.mode,
+							   chunksize,
+							   blocksize);
+
+	/*
+	 * If a non-first chunk, then include the digest returned from the
+	 * previous chunk so that hw can add to it (except for AES types).
+	 */
+	if ((hash_parms.type == HASH_TYPE_UPDT) &&
+	    (hash_parms.alg != HASH_ALG_AES)) {
+		hash_parms.key_buf = rctx->incr_hash;
+		hash_parms.key_len = digestsize;
+	}
+
+	atomic64_add(chunksize, &iproc_priv.bytes_out);
+
+	flow_log("%s() final: %u nbuf: %u ",
+		 __func__, rctx->is_final, local_nbuf);
+
+	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
+		flow_log("max_payload infinite\n");
+	else
+		flow_log("max_payload %u\n", ctx->max_payload);
+
+	flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
+
+	/* Prepend SPU header with type 3 BCM header */
+	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
+
+	hash_parms.prebuf_len = local_nbuf;
+	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
+					      BCM_HDR_LEN,
+					      &req_opts, &cipher_parms,
+					      &hash_parms, &aead_parms,
+					      new_data_len);
+
+	if (spu_hdr_len == 0) {
+		pr_err("Failed to create SPU request header\n");
+		return -EFAULT;
+	}
+
+	/*
+	 * Determine total length of padding required. Put all padding in one
+	 * buffer.
+	 */
+	data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
+	db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
+				   0, 0, hash_parms.pad_len);
+	if (spu->spu_tx_status_len())
+		stat_pad_len = spu->spu_wordalign_padlen(db_size);
+	if (stat_pad_len)
+		rx_frag_num++;
+	pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
+	if (pad_len) {
+		tx_frag_num++;
+		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
+				     hash_parms.pad_len, ctx->auth.alg,
+				     ctx->auth.mode, rctx->total_sent,
+				     stat_pad_len);
+	}
+
+	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
+			      spu_hdr_len);
+	packet_dump("    prebuf: ", rctx->hash_carry, local_nbuf);
+	flow_log("Data:\n");
+	dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
+	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
+
+	/*
+	 * Build mailbox message containing SPU request msg and rx buffers
+	 * to catch response message
+	 */
+	memset(mssg, 0, sizeof(*mssg));
+	mssg->type = BRCM_MESSAGE_SPU;
+	mssg->ctx = rctx;	/* Will be returned in response */
+
+	/* Create rx scatterlist to catch result */
+	err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
+				     stat_pad_len);
+	if (err)
+		return err;
+
+	/* Create tx scatterlist containing SPU request message */
+	tx_frag_num += rctx->src_nents;
+	if (spu->spu_tx_status_len())
+		tx_frag_num++;
+	err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
+				     local_nbuf, new_data_len, pad_len);
+	if (err)
+		return err;
+
+	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+	if (unlikely(err < 0))
+		return err;
+
+	return -EINPROGRESS;
+}
+
+/**
+ * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
+ * for an HMAC request.
+ * @req:  The HMAC request from the crypto API
+ * @ctx:  The session context
+ *
+ * Return: 0 if synchronous hash operation successful
+ *         -EINVAL if the hash algo is unrecognized
+ *         any other value indicates an error
+ */
+static int spu_hmac_outer_hash(struct ahash_request *req,
+			       struct iproc_ctx_s *ctx)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	unsigned int blocksize =
+		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+	int rc;
+
+	switch (ctx->auth.alg) {
+	case HASH_ALG_MD5:
+		rc = do_shash("md5", req->result, ctx->opad, blocksize,
+			      req->result, ctx->digestsize, NULL, 0);
+		break;
+	case HASH_ALG_SHA1:
+		rc = do_shash("sha1", req->result, ctx->opad, blocksize,
+			      req->result, ctx->digestsize, NULL, 0);
+		break;
+	case HASH_ALG_SHA224:
+		rc = do_shash("sha224", req->result, ctx->opad, blocksize,
+			      req->result, ctx->digestsize, NULL, 0);
+		break;
+	case HASH_ALG_SHA256:
+		rc = do_shash("sha256", req->result, ctx->opad, blocksize,
+			      req->result, ctx->digestsize, NULL, 0);
+		break;
+	case HASH_ALG_SHA384:
+		rc = do_shash("sha384", req->result, ctx->opad, blocksize,
+			      req->result, ctx->digestsize, NULL, 0);
+		break;
+	case HASH_ALG_SHA512:
+		rc = do_shash("sha512", req->result, ctx->opad, blocksize,
+			      req->result, ctx->digestsize, NULL, 0);
+		break;
+	default:
+		pr_err("%s() Error : unknown hmac type\n", __func__);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+/**
+ * ahash_req_done() - Process a hash result from the SPU hardware.
+ * @rctx: Crypto request context
+ *
+ * Return: 0 if successful
+ *         < 0 if an error
+ */
+static int ahash_req_done(struct iproc_reqctx_s *rctx)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct crypto_async_request *areq = rctx->parent;
+	struct ahash_request *req = ahash_request_cast(areq);
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	int err;
+
+	memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
+
+	if (spu->spu_type == SPU_TYPE_SPUM) {
+		/* byte swap the output from the UPDT function to network byte
+		 * order
+		 */
+		if (ctx->auth.alg == HASH_ALG_MD5) {
+			__swab32s((u32 *)req->result);
+			__swab32s(((u32 *)req->result) + 1);
+			__swab32s(((u32 *)req->result) + 2);
+			__swab32s(((u32 *)req->result) + 3);
+			__swab32s(((u32 *)req->result) + 4);
+		}
+	}
+
+	flow_dump("  digest ", req->result, ctx->digestsize);
+
+	/* if this an HMAC then do the outer hash */
+	if (rctx->is_sw_hmac) {
+		err = spu_hmac_outer_hash(req, ctx);
+		if (err < 0)
+			return err;
+		flow_dump("  hmac: ", req->result, ctx->digestsize);
+	}
+
+	if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
+		atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
+		atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
+	} else {
+		atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
+		atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
+	}
+
+	return 0;
+}
+
+/**
+ * handle_ahash_resp() - Process a SPU response message for a hash request.
+ * Checks if the entire crypto API request has been processed, and if so,
+ * invokes post processing on the result.
+ * @rctx: Crypto request context
+ */
+static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
+{
+	struct iproc_ctx_s *ctx = rctx->ctx;
+#ifdef DEBUG
+	struct crypto_async_request *areq = rctx->parent;
+	struct ahash_request *req = ahash_request_cast(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	unsigned int blocksize =
+		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+#endif
+	/*
+	 * Save hash to use as input to next op if incremental. Might be copying
+	 * too much, but that's easier than figuring out actual digest size here
+	 */
+	memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
+
+	flow_log("%s() blocksize:%u digestsize:%u\n",
+		 __func__, blocksize, ctx->digestsize);
+
+	atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
+
+	if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
+		ahash_req_done(rctx);
+}
+
+/**
+ * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
+ * a SPU response message for an AEAD request. Includes buffers to catch SPU
+ * message headers and the response data.
+ * @mssg:	mailbox message containing the receive sg
+ * @rctx:	crypto request context
+ * @rx_frag_num: number of scatterlist elements required to hold the
+ *		SPU response message
+ * @assoc_len:	Length of associated data included in the crypto request
+ * @ret_iv_len: Length of IV returned in response
+ * @resp_len:	Number of bytes of response data expected to be written to
+ *              dst buffer from crypto API
+ * @digestsize: Length of hash digest, in bytes
+ * @stat_pad_len: Number of bytes required to pad the STAT field to
+ *		a 4-byte boundary
+ *
+ * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
+ * when the request completes, whether the request is handled successfully or
+ * there is an error.
+ *
+ * Returns:
+ *   0 if successful
+ *   < 0 if an error
+ */
+static int spu_aead_rx_sg_create(struct brcm_message *mssg,
+				 struct aead_request *req,
+				 struct iproc_reqctx_s *rctx,
+				 u8 rx_frag_num,
+				 unsigned int assoc_len,
+				 u32 ret_iv_len, unsigned int resp_len,
+				 unsigned int digestsize, u32 stat_pad_len)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct scatterlist *sg;	/* used to build sgs in mbox message */
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	u32 datalen;		/* Number of bytes of response data expected */
+	u32 assoc_buf_len;
+	u8 data_padlen = 0;
+
+	if (ctx->is_rfc4543) {
+		/* RFC4543: only pad after data, not after AAD */
+		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
+							  assoc_len + resp_len);
+		assoc_buf_len = assoc_len;
+	} else {
+		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
+							  resp_len);
+		assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
+						assoc_len, ret_iv_len,
+						rctx->is_encrypt);
+	}
+
+	if (ctx->cipher.mode == CIPHER_MODE_CCM)
+		/* ICV (after data) must be in the next 32-bit word for CCM */
+		data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
+							 resp_len +
+							 data_padlen);
+
+	if (data_padlen)
+		/* have to catch gcm pad in separate buffer */
+		rx_frag_num++;
+
+	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
+				rctx->gfp);
+	if (!mssg->spu.dst)
+		return -ENOMEM;
+
+	sg = mssg->spu.dst;
+	sg_init_table(sg, rx_frag_num);
+
+	/* Space for SPU message header */
+	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
+
+	if (assoc_buf_len) {
+		/*
+		 * Don't write directly to req->dst, because SPU may pad the
+		 * assoc data in the response
+		 */
+		memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
+		sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
+	}
+
+	if (resp_len) {
+		/*
+		 * Copy in each dst sg entry from request, up to chunksize.
+		 * dst sg catches just the data. digest caught in separate buf.
+		 */
+		datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
+					 rctx->dst_nents, resp_len);
+		if (datalen < (resp_len)) {
+			pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
+			       __func__, resp_len, datalen);
+			return -EFAULT;
+		}
+	}
+
+	/* If GCM/CCM data is padded, catch padding in separate buffer */
+	if (data_padlen) {
+		memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
+		sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
+	}
+
+	/* Always catch ICV in separate buffer */
+	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
+
+	flow_log("stat_pad_len %u\n", stat_pad_len);
+	if (stat_pad_len) {
+		memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
+		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
+	}
+
+	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
+	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
+
+	return 0;
+}
+
+/**
+ * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
+ * SPU request message for an AEAD request. Includes SPU message headers and the
+ * request data.
+ * @mssg:	mailbox message containing the transmit sg
+ * @rctx:	crypto request context
+ * @tx_frag_num: number of scatterlist elements required to construct the
+ *		SPU request message
+ * @spu_hdr_len: length of SPU message header in bytes
+ * @assoc:	crypto API associated data scatterlist
+ * @assoc_len:	length of associated data
+ * @assoc_nents: number of scatterlist entries containing assoc data
+ * @aead_iv_len: length of AEAD IV, if included
+ * @chunksize:	Number of bytes of request data
+ * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
+ * @pad_len:	Number of pad bytes
+ * @incl_icv:	If true, write separate ICV buffer after data and
+ *              any padding
+ *
+ * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
+ * when the request completes, whether the request is handled successfully or
+ * there is an error.
+ *
+ * Return:
+ *   0 if successful
+ *   < 0 if an error
+ */
+static int spu_aead_tx_sg_create(struct brcm_message *mssg,
+				 struct iproc_reqctx_s *rctx,
+				 u8 tx_frag_num,
+				 u32 spu_hdr_len,
+				 struct scatterlist *assoc,
+				 unsigned int assoc_len,
+				 int assoc_nents,
+				 unsigned int aead_iv_len,
+				 unsigned int chunksize,
+				 u32 aad_pad_len, u32 pad_len, bool incl_icv)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct scatterlist *sg;	/* used to build sgs in mbox message */
+	struct scatterlist *assoc_sg = assoc;
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	u32 datalen;		/* Number of bytes of data to write */
+	u32 written;		/* Number of bytes of data written */
+	u32 assoc_offset = 0;
+	u32 stat_len;
+
+	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
+				rctx->gfp);
+	if (!mssg->spu.src)
+		return -ENOMEM;
+
+	sg = mssg->spu.src;
+	sg_init_table(sg, tx_frag_num);
+
+	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
+		   BCM_HDR_LEN + spu_hdr_len);
+
+	if (assoc_len) {
+		/* Copy in each associated data sg entry from request */
+		written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
+					 assoc_nents, assoc_len);
+		if (written < assoc_len) {
+			pr_err("%s(): failed to copy assoc sg to mbox msg",
+			       __func__);
+			return -EFAULT;
+		}
+	}
+
+	if (aead_iv_len)
+		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
+
+	if (aad_pad_len) {
+		memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
+		sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
+	}
+
+	datalen = chunksize;
+	if ((chunksize > ctx->digestsize) && incl_icv)
+		datalen -= ctx->digestsize;
+	if (datalen) {
+		/* For aead, a single msg should consume the entire src sg */
+		written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
+					 rctx->src_nents, datalen);
+		if (written < datalen) {
+			pr_err("%s(): failed to copy src sg to mbox msg",
+			       __func__);
+			return -EFAULT;
+		}
+	}
+
+	if (pad_len) {
+		memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
+		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
+	}
+
+	if (incl_icv)
+		sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
+
+	stat_len = spu->spu_tx_status_len();
+	if (stat_len) {
+		memset(rctx->msg_buf.tx_stat, 0, stat_len);
+		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
+	}
+	return 0;
+}
+
+/**
+ * handle_aead_req() - Submit a SPU request message for the next chunk of the
+ * current AEAD request.
+ * @rctx:  Crypto request context
+ *
+ * Unlike other operation types, we assume the length of the request fits in
+ * a single SPU request message. aead_enqueue() makes sure this is true.
+ * Comments for other op types regarding threads applies here as well.
+ *
+ * Unlike incremental hash ops, where the spu returns the entire hash for
+ * truncated algs like sha-224, the SPU returns just the truncated hash in
+ * response to aead requests. So digestsize is always ctx->digestsize here.
+ *
+ * Return: -EINPROGRESS: crypto request has been accepted and result will be
+ *			 returned asynchronously
+ *         Any other value indicates an error
+ */
+static int handle_aead_req(struct iproc_reqctx_s *rctx)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct crypto_async_request *areq = rctx->parent;
+	struct aead_request *req = container_of(areq,
+						struct aead_request, base);
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	int err;
+	unsigned int chunksize;
+	unsigned int resp_len;
+	u32 spu_hdr_len;
+	u32 db_size;
+	u32 stat_pad_len;
+	u32 pad_len;
+	struct brcm_message *mssg;	/* mailbox message */
+	struct spu_request_opts req_opts;
+	struct spu_cipher_parms cipher_parms;
+	struct spu_hash_parms hash_parms;
+	struct spu_aead_parms aead_parms;
+	int assoc_nents = 0;
+	bool incl_icv = false;
+	unsigned int digestsize = ctx->digestsize;
+
+	/* number of entries in src and dst sg. Always includes SPU msg header.
+	 */
+	u8 rx_frag_num = 2;	/* and STATUS */
+	u8 tx_frag_num = 1;
+
+	/* doing the whole thing at once */
+	chunksize = rctx->total_todo;
+
+	flow_log("%s: chunksize %u\n", __func__, chunksize);
+
+	memset(&req_opts, 0, sizeof(req_opts));
+	memset(&hash_parms, 0, sizeof(hash_parms));
+	memset(&aead_parms, 0, sizeof(aead_parms));
+
+	req_opts.is_inbound = !(rctx->is_encrypt);
+	req_opts.auth_first = ctx->auth_first;
+	req_opts.is_aead = true;
+	req_opts.is_esp = ctx->is_esp;
+
+	cipher_parms.alg = ctx->cipher.alg;
+	cipher_parms.mode = ctx->cipher.mode;
+	cipher_parms.type = ctx->cipher_type;
+	cipher_parms.key_buf = ctx->enckey;
+	cipher_parms.key_len = ctx->enckeylen;
+	cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
+	cipher_parms.iv_len = rctx->iv_ctr_len;
+
+	hash_parms.alg = ctx->auth.alg;
+	hash_parms.mode = ctx->auth.mode;
+	hash_parms.type = HASH_TYPE_NONE;
+	hash_parms.key_buf = (u8 *)ctx->authkey;
+	hash_parms.key_len = ctx->authkeylen;
+	hash_parms.digestsize = digestsize;
+
+	if ((ctx->auth.alg == HASH_ALG_SHA224) &&
+	    (ctx->authkeylen < SHA224_DIGEST_SIZE))
+		hash_parms.key_len = SHA224_DIGEST_SIZE;
+
+	aead_parms.assoc_size = req->assoclen;
+	if (ctx->is_esp && !ctx->is_rfc4543) {
+		/*
+		 * 8-byte IV is included assoc data in request. SPU2
+		 * expects AAD to include just SPI and seqno. So
+		 * subtract off the IV len.
+		 */
+		aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
+
+		if (rctx->is_encrypt) {
+			aead_parms.return_iv = true;
+			aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
+			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
+		}
+	} else {
+		aead_parms.ret_iv_len = 0;
+	}
+
+	/*
+	 * Count number of sg entries from the crypto API request that are to
+	 * be included in this mailbox message. For dst sg, don't count space
+	 * for digest. Digest gets caught in a separate buffer and copied back
+	 * to dst sg when processing response.
+	 */
+	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
+	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
+	if (aead_parms.assoc_size)
+		assoc_nents = spu_sg_count(rctx->assoc, 0,
+					   aead_parms.assoc_size);
+
+	mssg = &rctx->mb_mssg;
+
+	rctx->total_sent = chunksize;
+	rctx->src_sent = chunksize;
+	if (spu->spu_assoc_resp_len(ctx->cipher.mode,
+				    aead_parms.assoc_size,
+				    aead_parms.ret_iv_len,
+				    rctx->is_encrypt))
+		rx_frag_num++;
+
+	aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
+						rctx->iv_ctr_len);
+
+	if (ctx->auth.alg == HASH_ALG_AES)
+		hash_parms.type = (enum hash_type)ctx->cipher_type;
+
+	/* General case AAD padding (CCM and RFC4543 special cases below) */
+	aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
+						 aead_parms.assoc_size);
+
+	/* General case data padding (CCM decrypt special case below) */
+	aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
+							   chunksize);
+
+	if (ctx->cipher.mode == CIPHER_MODE_CCM) {
+		/*
+		 * for CCM, AAD len + 2 (rather than AAD len) needs to be
+		 * 128-bit aligned
+		 */
+		aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
+					 ctx->cipher.mode,
+					 aead_parms.assoc_size + 2);
+
+		/*
+		 * And when decrypting CCM, need to pad without including
+		 * size of ICV which is tacked on to end of chunk
+		 */
+		if (!rctx->is_encrypt)
+			aead_parms.data_pad_len =
+				spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
+							chunksize - digestsize);
+
+		/* CCM also requires software to rewrite portions of IV: */
+		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
+				       chunksize, rctx->is_encrypt,
+				       ctx->is_esp);
+	}
+
+	if (ctx->is_rfc4543) {
+		/*
+		 * RFC4543: data is included in AAD, so don't pad after AAD
+		 * and pad data based on both AAD + data size
+		 */
+		aead_parms.aad_pad_len = 0;
+		if (!rctx->is_encrypt)
+			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
+					ctx->cipher.mode,
+					aead_parms.assoc_size + chunksize -
+					digestsize);
+		else
+			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
+					ctx->cipher.mode,
+					aead_parms.assoc_size + chunksize);
+
+		req_opts.is_rfc4543 = true;
+	}
+
+	if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
+		incl_icv = true;
+		tx_frag_num++;
+		/* Copy ICV from end of src scatterlist to digest buf */
+		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
+				    req->assoclen + rctx->total_sent -
+				    digestsize);
+	}
+
+	atomic64_add(chunksize, &iproc_priv.bytes_out);
+
+	flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
+
+	/* Prepend SPU header with type 3 BCM header */
+	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
+
+	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
+					      BCM_HDR_LEN, &req_opts,
+					      &cipher_parms, &hash_parms,
+					      &aead_parms, chunksize);
+
+	/* Determine total length of padding. Put all padding in one buffer. */
+	db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
+				   chunksize, aead_parms.aad_pad_len,
+				   aead_parms.data_pad_len, 0);
+
+	stat_pad_len = spu->spu_wordalign_padlen(db_size);
+
+	if (stat_pad_len)
+		rx_frag_num++;
+	pad_len = aead_parms.data_pad_len + stat_pad_len;
+	if (pad_len) {
+		tx_frag_num++;
+		spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
+				     aead_parms.data_pad_len, 0,
+				     ctx->auth.alg, ctx->auth.mode,
+				     rctx->total_sent, stat_pad_len);
+	}
+
+	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
+			      spu_hdr_len);
+	dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
+	packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
+	packet_log("BD:\n");
+	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
+	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
+
+	/*
+	 * Build mailbox message containing SPU request msg and rx buffers
+	 * to catch response message
+	 */
+	memset(mssg, 0, sizeof(*mssg));
+	mssg->type = BRCM_MESSAGE_SPU;
+	mssg->ctx = rctx;	/* Will be returned in response */
+
+	/* Create rx scatterlist to catch result */
+	rx_frag_num += rctx->dst_nents;
+	resp_len = chunksize;
+
+	/*
+	 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
+	 * padding. Have to for SHA-224 and other truncated SHAs because SPU
+	 * sends entire digest back.
+	 */
+	rx_frag_num++;
+
+	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
+	     (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
+		/*
+		 * Input is ciphertxt plus ICV, but ICV not incl
+		 * in output.
+		 */
+		resp_len -= ctx->digestsize;
+		if (resp_len == 0)
+			/* no rx frags to catch output data */
+			rx_frag_num -= rctx->dst_nents;
+	}
+
+	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
+				    aead_parms.assoc_size,
+				    aead_parms.ret_iv_len, resp_len, digestsize,
+				    stat_pad_len);
+	if (err)
+		return err;
+
+	/* Create tx scatterlist containing SPU request message */
+	tx_frag_num += rctx->src_nents;
+	tx_frag_num += assoc_nents;
+	if (aead_parms.aad_pad_len)
+		tx_frag_num++;
+	if (aead_parms.iv_len)
+		tx_frag_num++;
+	if (spu->spu_tx_status_len())
+		tx_frag_num++;
+	err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
+				    rctx->assoc, aead_parms.assoc_size,
+				    assoc_nents, aead_parms.iv_len, chunksize,
+				    aead_parms.aad_pad_len, pad_len, incl_icv);
+	if (err)
+		return err;
+
+	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+	if (unlikely(err < 0))
+		return err;
+
+	return -EINPROGRESS;
+}
+
+/**
+ * handle_aead_resp() - Process a SPU response message for an AEAD request.
+ * @rctx:  Crypto request context
+ */
+static void handle_aead_resp(struct iproc_reqctx_s *rctx)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct crypto_async_request *areq = rctx->parent;
+	struct aead_request *req = container_of(areq,
+						struct aead_request, base);
+	struct iproc_ctx_s *ctx = rctx->ctx;
+	u32 payload_len;
+	unsigned int icv_offset;
+	u32 result_len;
+
+	/* See how much data was returned */
+	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
+	flow_log("payload_len %u\n", payload_len);
+
+	/* only count payload */
+	atomic64_add(payload_len, &iproc_priv.bytes_in);
+
+	if (req->assoclen)
+		packet_dump("  assoc_data ", rctx->msg_buf.a.resp_aad,
+			    req->assoclen);
+
+	/*
+	 * Copy the ICV back to the destination
+	 * buffer. In decrypt case, SPU gives us back the digest, but crypto
+	 * API doesn't expect ICV in dst buffer.
+	 */
+	result_len = req->cryptlen;
+	if (rctx->is_encrypt) {
+		icv_offset = req->assoclen + rctx->total_sent;
+		packet_dump("  ICV: ", rctx->msg_buf.digest, ctx->digestsize);
+		flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
+		sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
+				      ctx->digestsize, icv_offset);
+		result_len += ctx->digestsize;
+	}
+
+	packet_log("response data:  ");
+	dump_sg(req->dst, req->assoclen, result_len);
+
+	atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
+	if (ctx->cipher.alg == CIPHER_ALG_AES) {
+		if (ctx->cipher.mode == CIPHER_MODE_CCM)
+			atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
+		else if (ctx->cipher.mode == CIPHER_MODE_GCM)
+			atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
+		else
+			atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
+	} else {
+		atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
+	}
+}
+
+/**
+ * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
+ * @rctx:  request context
+ *
+ * Mailbox scatterlists are allocated for each chunk. So free them after
+ * processing each chunk.
+ */
+static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
+{
+	/* mailbox message used to tx request */
+	struct brcm_message *mssg = &rctx->mb_mssg;
+
+	kfree(mssg->spu.src);
+	kfree(mssg->spu.dst);
+	memset(mssg, 0, sizeof(struct brcm_message));
+}
+
+/**
+ * finish_req() - Used to invoke the complete callback from the requester when
+ * a request has been handled asynchronously.
+ * @rctx:  Request context
+ * @err:   Indicates whether the request was successful or not
+ *
+ * Ensures that cleanup has been done for request
+ */
+static void finish_req(struct iproc_reqctx_s *rctx, int err)
+{
+	struct crypto_async_request *areq = rctx->parent;
+
+	flow_log("%s() err:%d\n\n", __func__, err);
+
+	/* No harm done if already called */
+	spu_chunk_cleanup(rctx);
+
+	if (areq)
+		areq->complete(areq, err);
+}
+
+/**
+ * spu_rx_callback() - Callback from mailbox framework with a SPU response.
+ * @cl:		mailbox client structure for SPU driver
+ * @msg:	mailbox message containing SPU response
+ */
+static void spu_rx_callback(struct mbox_client *cl, void *msg)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct brcm_message *mssg = msg;
+	struct iproc_reqctx_s *rctx;
+	struct iproc_ctx_s *ctx;
+	struct crypto_async_request *areq;
+	int err = 0;
+
+	rctx = mssg->ctx;
+	if (unlikely(!rctx)) {
+		/* This is fatal */
+		pr_err("%s(): no request context", __func__);
+		err = -EFAULT;
+		goto cb_finish;
+	}
+	areq = rctx->parent;
+	ctx = rctx->ctx;
+
+	/* process the SPU status */
+	err = spu->spu_status_process(rctx->msg_buf.rx_stat);
+	if (err != 0) {
+		if (err == SPU_INVALID_ICV)
+			atomic_inc(&iproc_priv.bad_icv);
+		err = -EBADMSG;
+		goto cb_finish;
+	}
+
+	/* Process the SPU response message */
+	switch (rctx->ctx->alg->type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		handle_ablkcipher_resp(rctx);
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		handle_ahash_resp(rctx);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		handle_aead_resp(rctx);
+		break;
+	default:
+		err = -EINVAL;
+		goto cb_finish;
+	}
+
+	/*
+	 * If this response does not complete the request, then send the next
+	 * request chunk.
+	 */
+	if (rctx->total_sent < rctx->total_todo) {
+		/* Deallocate anything specific to previous chunk */
+		spu_chunk_cleanup(rctx);
+
+		switch (rctx->ctx->alg->type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			err = handle_ablkcipher_req(rctx);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			err = handle_ahash_req(rctx);
+			if (err == -EAGAIN)
+				/*
+				 * we saved data in hash carry, but tell crypto
+				 * API we successfully completed request.
+				 */
+				err = 0;
+			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			err = handle_aead_req(rctx);
+			break;
+		default:
+			err = -EINVAL;
+		}
+
+		if (err == -EINPROGRESS)
+			/* Successfully submitted request for next chunk */
+			return;
+	}
+
+cb_finish:
+	finish_req(rctx, err);
+}
+
+/* ==================== Kernel Cryptographic API ==================== */
+
+/**
+ * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
+ * @req:	Crypto API request
+ * @encrypt:	true if encrypting; false if decrypting
+ *
+ * Return: -EINPROGRESS if request accepted and result will be returned
+ *			asynchronously
+ *	   < 0 if an error
+ */
+static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
+{
+	struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
+	struct iproc_ctx_s *ctx =
+	    crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+	int err;
+
+	flow_log("%s() enc:%u\n", __func__, encrypt);
+
+	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+	rctx->parent = &req->base;
+	rctx->is_encrypt = encrypt;
+	rctx->bd_suppress = false;
+	rctx->total_todo = req->nbytes;
+	rctx->src_sent = 0;
+	rctx->total_sent = 0;
+	rctx->total_received = 0;
+	rctx->ctx = ctx;
+
+	/* Initialize current position in src and dst scatterlists */
+	rctx->src_sg = req->src;
+	rctx->src_nents = 0;
+	rctx->src_skip = 0;
+	rctx->dst_sg = req->dst;
+	rctx->dst_nents = 0;
+	rctx->dst_skip = 0;
+
+	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
+	    ctx->cipher.mode == CIPHER_MODE_CTR ||
+	    ctx->cipher.mode == CIPHER_MODE_OFB ||
+	    ctx->cipher.mode == CIPHER_MODE_XTS ||
+	    ctx->cipher.mode == CIPHER_MODE_GCM ||
+	    ctx->cipher.mode == CIPHER_MODE_CCM) {
+		rctx->iv_ctr_len =
+		    crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+		memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
+	} else {
+		rctx->iv_ctr_len = 0;
+	}
+
+	/* Choose a SPU to process this request */
+	rctx->chan_idx = select_channel();
+	err = handle_ablkcipher_req(rctx);
+	if (err != -EINPROGRESS)
+		/* synchronous result */
+		spu_chunk_cleanup(rctx);
+
+	return err;
+}
+
+static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+		      unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 tmp[DES_EXPKEY_WORDS];
+
+	if (keylen == DES_KEY_SIZE) {
+		if (des_ekey(tmp, key) == 0) {
+			if (crypto_ablkcipher_get_flags(cipher) &
+			    CRYPTO_TFM_REQ_WEAK_KEY) {
+				u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
+
+				crypto_ablkcipher_set_flags(cipher, flags);
+				return -EINVAL;
+			}
+		}
+
+		ctx->cipher_type = CIPHER_TYPE_DES;
+	} else {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+
+	if (keylen == (DES_KEY_SIZE * 3)) {
+		const u32 *K = (const u32 *)key;
+		u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+
+		if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+		    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+			crypto_ablkcipher_set_flags(cipher, flags);
+			return -EINVAL;
+		}
+
+		ctx->cipher_type = CIPHER_TYPE_3DES;
+	} else {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+		      unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+
+	if (ctx->cipher.mode == CIPHER_MODE_XTS)
+		/* XTS includes two keys of equal length */
+		keylen = keylen / 2;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		ctx->cipher_type = CIPHER_TYPE_AES128;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->cipher_type = CIPHER_TYPE_AES192;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->cipher_type = CIPHER_TYPE_AES256;
+		break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
+		((ctx->max_payload % AES_BLOCK_SIZE) != 0));
+	return 0;
+}
+
+static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+		      unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+	int i;
+
+	ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
+
+	ctx->enckey[0] = 0x00;	/* 0x00 */
+	ctx->enckey[1] = 0x00;	/* i    */
+	ctx->enckey[2] = 0x00;	/* 0x00 */
+	ctx->enckey[3] = 0x00;	/* j    */
+	for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
+		ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
+
+	ctx->cipher_type = CIPHER_TYPE_INIT;
+
+	return 0;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			     unsigned int keylen)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+	struct spu_cipher_parms cipher_parms;
+	u32 alloc_len = 0;
+	int err;
+
+	flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
+	flow_dump("  key: ", key, keylen);
+
+	switch (ctx->cipher.alg) {
+	case CIPHER_ALG_DES:
+		err = des_setkey(cipher, key, keylen);
+		break;
+	case CIPHER_ALG_3DES:
+		err = threedes_setkey(cipher, key, keylen);
+		break;
+	case CIPHER_ALG_AES:
+		err = aes_setkey(cipher, key, keylen);
+		break;
+	case CIPHER_ALG_RC4:
+		err = rc4_setkey(cipher, key, keylen);
+		break;
+	default:
+		pr_err("%s() Error: unknown cipher alg\n", __func__);
+		err = -EINVAL;
+	}
+	if (err)
+		return err;
+
+	/* RC4 already populated ctx->enkey */
+	if (ctx->cipher.alg != CIPHER_ALG_RC4) {
+		memcpy(ctx->enckey, key, keylen);
+		ctx->enckeylen = keylen;
+	}
+	/* SPU needs XTS keys in the reverse order the crypto API presents */
+	if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
+	    (ctx->cipher.mode == CIPHER_MODE_XTS)) {
+		unsigned int xts_keylen = keylen / 2;
+
+		memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
+		memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
+	}
+
+	if (spu->spu_type == SPU_TYPE_SPUM)
+		alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
+	else if (spu->spu_type == SPU_TYPE_SPU2)
+		alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
+	memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
+	cipher_parms.iv_buf = NULL;
+	cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
+	flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
+
+	cipher_parms.alg = ctx->cipher.alg;
+	cipher_parms.mode = ctx->cipher.mode;
+	cipher_parms.type = ctx->cipher_type;
+	cipher_parms.key_buf = ctx->enckey;
+	cipher_parms.key_len = ctx->enckeylen;
+
+	/* Prepend SPU request message with BCM header */
+	memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
+	ctx->spu_req_hdr_len =
+	    spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
+				     &cipher_parms);
+
+	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
+							  ctx->enckeylen,
+							  false);
+
+	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
+
+	return 0;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
+
+	return ablkcipher_enqueue(req, true);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
+	return ablkcipher_enqueue(req, false);
+}
+
+static int ahash_enqueue(struct ahash_request *req)
+{
+	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+	int err = 0;
+	const char *alg_name;
+
+	flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
+
+	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+	rctx->parent = &req->base;
+	rctx->ctx = ctx;
+	rctx->bd_suppress = true;
+	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
+
+	/* Initialize position in src scatterlist */
+	rctx->src_sg = req->src;
+	rctx->src_skip = 0;
+	rctx->src_nents = 0;
+	rctx->dst_sg = NULL;
+	rctx->dst_skip = 0;
+	rctx->dst_nents = 0;
+
+	/* SPU2 hardware does not compute hash of zero length data */
+	if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
+	    (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
+		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+		flow_log("Doing %sfinal %s zero-len hash request in software\n",
+			 rctx->is_final ? "" : "non-", alg_name);
+		err = do_shash((unsigned char *)alg_name, req->result,
+			       NULL, 0, NULL, 0, ctx->authkey,
+			       ctx->authkeylen);
+		if (err < 0)
+			flow_log("Hash request failed with error %d\n", err);
+		return err;
+	}
+	/* Choose a SPU to process this request */
+	rctx->chan_idx = select_channel();
+
+	err = handle_ahash_req(rctx);
+	if (err != -EINPROGRESS)
+		/* synchronous result */
+		spu_chunk_cleanup(rctx);
+
+	if (err == -EAGAIN)
+		/*
+		 * we saved data in hash carry, but tell crypto API
+		 * we successfully completed request.
+		 */
+		err = 0;
+
+	return err;
+}
+
+static int __ahash_init(struct ahash_request *req)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+
+	flow_log("%s()\n", __func__);
+
+	/* Initialize the context */
+	rctx->hash_carry_len = 0;
+	rctx->is_final = 0;
+
+	rctx->total_todo = 0;
+	rctx->src_sent = 0;
+	rctx->total_sent = 0;
+	rctx->total_received = 0;
+
+	ctx->digestsize = crypto_ahash_digestsize(tfm);
+	/* If we add a hash whose digest is larger, catch it here. */
+	WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
+
+	rctx->is_sw_hmac = false;
+
+	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
+							  true);
+
+	return 0;
+}
+
+/**
+ * spu_no_incr_hash() - Determine whether incremental hashing is supported.
+ * @ctx:  Crypto session context
+ *
+ * SPU-2 does not support incremental hashing (we'll have to revisit and
+ * condition based on chip revision or device tree entry if future versions do
+ * support incremental hash)
+ *
+ * SPU-M also doesn't support incremental hashing of AES-XCBC
+ *
+ * Return: true if incremental hashing is not supported
+ *         false otherwise
+ */
+bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+
+	if (spu->spu_type == SPU_TYPE_SPU2)
+		return true;
+
+	if ((ctx->auth.alg == HASH_ALG_AES) &&
+	    (ctx->auth.mode == HASH_MODE_XCBC))
+		return true;
+
+	/* Otherwise, incremental hashing is supported */
+	return false;
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+	const char *alg_name;
+	struct crypto_shash *hash;
+	int ret;
+	gfp_t gfp;
+
+	if (spu_no_incr_hash(ctx)) {
+		/*
+		 * If we get an incremental hashing request and it's not
+		 * supported by the hardware, we need to handle it in software
+		 * by calling synchronous hash functions.
+		 */
+		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+		hash = crypto_alloc_shash(alg_name, 0, 0);
+		if (IS_ERR(hash)) {
+			ret = PTR_ERR(hash);
+			goto err;
+		}
+
+		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+		ctx->shash = kmalloc(sizeof(*ctx->shash) +
+				     crypto_shash_descsize(hash), gfp);
+		if (!ctx->shash) {
+			ret = -ENOMEM;
+			goto err_hash;
+		}
+		ctx->shash->tfm = hash;
+		ctx->shash->flags = 0;
+
+		/* Set the key using data we already have from setkey */
+		if (ctx->authkeylen > 0) {
+			ret = crypto_shash_setkey(hash, ctx->authkey,
+						  ctx->authkeylen);
+			if (ret)
+				goto err_shash;
+		}
+
+		/* Initialize hash w/ this key and other params */
+		ret = crypto_shash_init(ctx->shash);
+		if (ret)
+			goto err_shash;
+	} else {
+		/* Otherwise call the internal function which uses SPU hw */
+		ret = __ahash_init(req);
+	}
+
+	return ret;
+
+err_shash:
+	kfree(ctx->shash);
+err_hash:
+	crypto_free_shash(hash);
+err:
+	return ret;
+}
+
+static int __ahash_update(struct ahash_request *req)
+{
+	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+
+	flow_log("ahash_update() nbytes:%u\n", req->nbytes);
+
+	if (!req->nbytes)
+		return 0;
+	rctx->total_todo += req->nbytes;
+	rctx->src_sent = 0;
+
+	return ahash_enqueue(req);
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+	u8 *tmpbuf;
+	int ret;
+	int nents;
+	gfp_t gfp;
+
+	if (spu_no_incr_hash(ctx)) {
+		/*
+		 * If we get an incremental hashing request and it's not
+		 * supported by the hardware, we need to handle it in software
+		 * by calling synchronous hash functions.
+		 */
+		if (req->src)
+			nents = sg_nents(req->src);
+		else
+			return -EINVAL;
+
+		/* Copy data from req scatterlist to tmp buffer */
+		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+		tmpbuf = kmalloc(req->nbytes, gfp);
+		if (!tmpbuf)
+			return -ENOMEM;
+
+		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
+				req->nbytes) {
+			kfree(tmpbuf);
+			return -EINVAL;
+		}
+
+		/* Call synchronous update */
+		ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
+		kfree(tmpbuf);
+	} else {
+		/* Otherwise call the internal function which uses SPU hw */
+		ret = __ahash_update(req);
+	}
+
+	return ret;
+}
+
+static int __ahash_final(struct ahash_request *req)
+{
+	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+
+	flow_log("ahash_final() nbytes:%u\n", req->nbytes);
+
+	rctx->is_final = 1;
+
+	return ahash_enqueue(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+	int ret;
+
+	if (spu_no_incr_hash(ctx)) {
+		/*
+		 * If we get an incremental hashing request and it's not
+		 * supported by the hardware, we need to handle it in software
+		 * by calling synchronous hash functions.
+		 */
+		ret = crypto_shash_final(ctx->shash, req->result);
+
+		/* Done with hash, can deallocate it now */
+		crypto_free_shash(ctx->shash->tfm);
+		kfree(ctx->shash);
+
+	} else {
+		/* Otherwise call the internal function which uses SPU hw */
+		ret = __ahash_final(req);
+	}
+
+	return ret;
+}
+
+static int __ahash_finup(struct ahash_request *req)
+{
+	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+
+	flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
+
+	rctx->total_todo += req->nbytes;
+	rctx->src_sent = 0;
+	rctx->is_final = 1;
+
+	return ahash_enqueue(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+	u8 *tmpbuf;
+	int ret;
+	int nents;
+	gfp_t gfp;
+
+	if (spu_no_incr_hash(ctx)) {
+		/*
+		 * If we get an incremental hashing request and it's not
+		 * supported by the hardware, we need to handle it in software
+		 * by calling synchronous hash functions.
+		 */
+		if (req->src) {
+			nents = sg_nents(req->src);
+		} else {
+			ret = -EINVAL;
+			goto ahash_finup_exit;
+		}
+
+		/* Copy data from req scatterlist to tmp buffer */
+		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+		tmpbuf = kmalloc(req->nbytes, gfp);
+		if (!tmpbuf) {
+			ret = -ENOMEM;
+			goto ahash_finup_exit;
+		}
+
+		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
+				req->nbytes) {
+			ret = -EINVAL;
+			goto ahash_finup_free;
+		}
+
+		/* Call synchronous update */
+		ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
+					 req->result);
+	} else {
+		/* Otherwise call the internal function which uses SPU hw */
+		return __ahash_finup(req);
+	}
+ahash_finup_free:
+	kfree(tmpbuf);
+
+ahash_finup_exit:
+	/* Done with hash, can deallocate it now */
+	crypto_free_shash(ctx->shash->tfm);
+	kfree(ctx->shash);
+	return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+	int err = 0;
+
+	flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
+
+	/* whole thing at once */
+	err = __ahash_init(req);
+	if (!err)
+		err = __ahash_finup(req);
+
+	return err;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
+			unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
+
+	flow_log("%s() ahash:%p key:%p keylen:%u\n",
+		 __func__, ahash, key, keylen);
+	flow_dump("  key: ", key, keylen);
+
+	if (ctx->auth.alg == HASH_ALG_AES) {
+		switch (keylen) {
+		case AES_KEYSIZE_128:
+			ctx->cipher_type = CIPHER_TYPE_AES128;
+			break;
+		case AES_KEYSIZE_192:
+			ctx->cipher_type = CIPHER_TYPE_AES192;
+			break;
+		case AES_KEYSIZE_256:
+			ctx->cipher_type = CIPHER_TYPE_AES256;
+			break;
+		default:
+			pr_err("%s() Error: Invalid key length\n", __func__);
+			return -EINVAL;
+		}
+	} else {
+		pr_err("%s() Error: unknown hash alg\n", __func__);
+		return -EINVAL;
+	}
+	memcpy(ctx->authkey, key, keylen);
+	ctx->authkeylen = keylen;
+
+	return 0;
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+	const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
+
+	spu_exp->total_todo = rctx->total_todo;
+	spu_exp->total_sent = rctx->total_sent;
+	spu_exp->is_sw_hmac = rctx->is_sw_hmac;
+	memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
+	spu_exp->hash_carry_len = rctx->hash_carry_len;
+	memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
+
+	return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
+
+	rctx->total_todo = spu_exp->total_todo;
+	rctx->total_sent = spu_exp->total_sent;
+	rctx->is_sw_hmac = spu_exp->is_sw_hmac;
+	memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
+	rctx->hash_carry_len = spu_exp->hash_carry_len;
+	memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
+
+	return 0;
+}
+
+static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+			     unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
+	unsigned int blocksize =
+		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	unsigned int index;
+	int rc;
+
+	flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
+		 __func__, ahash, key, keylen, blocksize, digestsize);
+	flow_dump("  key: ", key, keylen);
+
+	if (keylen > blocksize) {
+		switch (ctx->auth.alg) {
+		case HASH_ALG_MD5:
+			rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
+				      0, NULL, 0);
+			break;
+		case HASH_ALG_SHA1:
+			rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
+				      0, NULL, 0);
+			break;
+		case HASH_ALG_SHA224:
+			rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
+				      0, NULL, 0);
+			break;
+		case HASH_ALG_SHA256:
+			rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
+				      0, NULL, 0);
+			break;
+		case HASH_ALG_SHA384:
+			rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
+				      0, NULL, 0);
+			break;
+		case HASH_ALG_SHA512:
+			rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
+				      0, NULL, 0);
+			break;
+		case HASH_ALG_SHA3_224:
+			rc = do_shash("sha3-224", ctx->authkey, key, keylen,
+				      NULL, 0, NULL, 0);
+			break;
+		case HASH_ALG_SHA3_256:
+			rc = do_shash("sha3-256", ctx->authkey, key, keylen,
+				      NULL, 0, NULL, 0);
+			break;
+		case HASH_ALG_SHA3_384:
+			rc = do_shash("sha3-384", ctx->authkey, key, keylen,
+				      NULL, 0, NULL, 0);
+			break;
+		case HASH_ALG_SHA3_512:
+			rc = do_shash("sha3-512", ctx->authkey, key, keylen,
+				      NULL, 0, NULL, 0);
+			break;
+		default:
+			pr_err("%s() Error: unknown hash alg\n", __func__);
+			return -EINVAL;
+		}
+		if (rc < 0) {
+			pr_err("%s() Error %d computing shash for %s\n",
+			       __func__, rc, hash_alg_name[ctx->auth.alg]);
+			return rc;
+		}
+		ctx->authkeylen = digestsize;
+
+		flow_log("  keylen > digestsize... hashed\n");
+		flow_dump("  newkey: ", ctx->authkey, ctx->authkeylen);
+	} else {
+		memcpy(ctx->authkey, key, keylen);
+		ctx->authkeylen = keylen;
+	}
+
+	/*
+	 * Full HMAC operation in SPUM is not verified,
+	 * So keeping the generation of IPAD, OPAD and
+	 * outer hashing in software.
+	 */
+	if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
+		memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
+		memset(ctx->ipad + ctx->authkeylen, 0,
+		       blocksize - ctx->authkeylen);
+		ctx->authkeylen = 0;
+		memcpy(ctx->opad, ctx->ipad, blocksize);
+
+		for (index = 0; index < blocksize; index++) {
+			ctx->ipad[index] ^= HMAC_IPAD_VALUE;
+			ctx->opad[index] ^= HMAC_OPAD_VALUE;
+		}
+
+		flow_dump("  ipad: ", ctx->ipad, blocksize);
+		flow_dump("  opad: ", ctx->opad, blocksize);
+	}
+	ctx->digestsize = digestsize;
+	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
+
+	return 0;
+}
+
+static int ahash_hmac_init(struct ahash_request *req)
+{
+	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	flow_log("ahash_hmac_init()\n");
+
+	/* init the context as a hash */
+	ahash_init(req);
+
+	if (!spu_no_incr_hash(ctx)) {
+		/* SPU-M can do incr hashing but needs sw for outer HMAC */
+		rctx->is_sw_hmac = true;
+		ctx->auth.mode = HASH_MODE_HASH;
+		/* start with a prepended ipad */
+		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
+		rctx->hash_carry_len = blocksize;
+		rctx->total_todo += blocksize;
+	}
+
+	return 0;
+}
+
+static int ahash_hmac_update(struct ahash_request *req)
+{
+	flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
+
+	if (!req->nbytes)
+		return 0;
+
+	return ahash_update(req);
+}
+
+static int ahash_hmac_final(struct ahash_request *req)
+{
+	flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
+
+	return ahash_final(req);
+}
+
+static int ahash_hmac_finup(struct ahash_request *req)
+{
+	flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
+
+	return ahash_finup(req);
+}
+
+static int ahash_hmac_digest(struct ahash_request *req)
+{
+	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
+
+	/* Perform initialization and then call finup */
+	__ahash_init(req);
+
+	if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
+		/*
+		 * SPU2 supports full HMAC implementation in the
+		 * hardware, need not to generate IPAD, OPAD and
+		 * outer hash in software.
+		 * Only for hash key len > hash block size, SPU2
+		 * expects to perform hashing on the key, shorten
+		 * it to digest size and feed it as hash key.
+		 */
+		rctx->is_sw_hmac = false;
+		ctx->auth.mode = HASH_MODE_HMAC;
+	} else {
+		rctx->is_sw_hmac = true;
+		ctx->auth.mode = HASH_MODE_HASH;
+		/* start with a prepended ipad */
+		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
+		rctx->hash_carry_len = blocksize;
+		rctx->total_todo += blocksize;
+	}
+
+	return __ahash_finup(req);
+}
+
+/* aead helpers */
+
+static int aead_need_fallback(struct aead_request *req)
+{
+	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
+	u32 payload_len;
+
+	/*
+	 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
+	 * and AAD are both 0 bytes long. So use fallback in this case.
+	 */
+	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
+	     (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
+	    (req->assoclen == 0)) {
+		if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
+		    (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
+			flow_log("AES GCM/CCM needs fallback for 0 len req\n");
+			return 1;
+		}
+	}
+
+	/* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
+	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
+	    (spu->spu_type == SPU_TYPE_SPUM) &&
+	    (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
+	    (ctx->digestsize != 16)) {
+		flow_log("%s() AES CCM needs fallback for digest size %d\n",
+			 __func__, ctx->digestsize);
+		return 1;
+	}
+
+	/*
+	 * SPU-M on NSP has an issue where AES-CCM hash is not correct
+	 * when AAD size is 0
+	 */
+	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
+	    (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
+	    (req->assoclen == 0)) {
+		flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
+			 __func__);
+		return 1;
+	}
+
+	payload_len = req->cryptlen;
+	if (spu->spu_type == SPU_TYPE_SPUM)
+		payload_len += req->assoclen;
+
+	flow_log("%s() payload len: %u\n", __func__, payload_len);
+
+	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
+		return 0;
+	else
+		return payload_len > ctx->max_payload;
+}
+
+static void aead_complete(struct crypto_async_request *areq, int err)
+{
+	struct aead_request *req =
+	    container_of(areq, struct aead_request, base);
+	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+	flow_log("%s() err:%d\n", __func__, err);
+
+	areq->tfm = crypto_aead_tfm(aead);
+
+	areq->complete = rctx->old_complete;
+	areq->data = rctx->old_data;
+
+	areq->complete(areq, err);
+}
+
+static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
+	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
+	int err;
+	u32 req_flags;
+
+	flow_log("%s() enc:%u\n", __func__, is_encrypt);
+
+	if (ctx->fallback_cipher) {
+		/* Store the cipher tfm and then use the fallback tfm */
+		rctx->old_tfm = tfm;
+		aead_request_set_tfm(req, ctx->fallback_cipher);
+		/*
+		 * Save the callback and chain ourselves in, so we can restore
+		 * the tfm
+		 */
+		rctx->old_complete = req->base.complete;
+		rctx->old_data = req->base.data;
+		req_flags = aead_request_flags(req);
+		aead_request_set_callback(req, req_flags, aead_complete, req);
+		err = is_encrypt ? crypto_aead_encrypt(req) :
+		    crypto_aead_decrypt(req);
+
+		if (err == 0) {
+			/*
+			 * fallback was synchronous (did not return
+			 * -EINPROGRESS). So restore request state here.
+			 */
+			aead_request_set_callback(req, req_flags,
+						  rctx->old_complete, req);
+			req->base.data = rctx->old_data;
+			aead_request_set_tfm(req, aead);
+			flow_log("%s() fallback completed successfully\n\n",
+				 __func__);
+		}
+	} else {
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int aead_enqueue(struct aead_request *req, bool is_encrypt)
+{
+	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
+	int err;
+
+	flow_log("%s() enc:%u\n", __func__, is_encrypt);
+
+	if (req->assoclen > MAX_ASSOC_SIZE) {
+		pr_err
+		    ("%s() Error: associated data too long. (%u > %u bytes)\n",
+		     __func__, req->assoclen, MAX_ASSOC_SIZE);
+		return -EINVAL;
+	}
+
+	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+	rctx->parent = &req->base;
+	rctx->is_encrypt = is_encrypt;
+	rctx->bd_suppress = false;
+	rctx->total_todo = req->cryptlen;
+	rctx->src_sent = 0;
+	rctx->total_sent = 0;
+	rctx->total_received = 0;
+	rctx->is_sw_hmac = false;
+	rctx->ctx = ctx;
+	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
+
+	/* assoc data is at start of src sg */
+	rctx->assoc = req->src;
+
+	/*
+	 * Init current position in src scatterlist to be after assoc data.
+	 * src_skip set to buffer offset where data begins. (Assoc data could
+	 * end in the middle of a buffer.)
+	 */
+	if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
+			     &rctx->src_skip) < 0) {
+		pr_err("%s() Error: Unable to find start of src data\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	rctx->src_nents = 0;
+	rctx->dst_nents = 0;
+	if (req->dst == req->src) {
+		rctx->dst_sg = rctx->src_sg;
+		rctx->dst_skip = rctx->src_skip;
+	} else {
+		/*
+		 * Expect req->dst to have room for assoc data followed by
+		 * output data and ICV, if encrypt. So initialize dst_sg
+		 * to point beyond assoc len offset.
+		 */
+		if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
+				     &rctx->dst_skip) < 0) {
+			pr_err("%s() Error: Unable to find start of dst data\n",
+			       __func__);
+			return -EINVAL;
+		}
+	}
+
+	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
+	    ctx->cipher.mode == CIPHER_MODE_CTR ||
+	    ctx->cipher.mode == CIPHER_MODE_OFB ||
+	    ctx->cipher.mode == CIPHER_MODE_XTS ||
+	    ctx->cipher.mode == CIPHER_MODE_GCM) {
+		rctx->iv_ctr_len =
+			ctx->salt_len +
+			crypto_aead_ivsize(crypto_aead_reqtfm(req));
+	} else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
+		rctx->iv_ctr_len = CCM_AES_IV_SIZE;
+	} else {
+		rctx->iv_ctr_len = 0;
+	}
+
+	rctx->hash_carry_len = 0;
+
+	flow_log("  src sg: %p\n", req->src);
+	flow_log("  rctx->src_sg: %p, src_skip %u\n",
+		 rctx->src_sg, rctx->src_skip);
+	flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
+	flow_log("  dst sg: %p\n", req->dst);
+	flow_log("  rctx->dst_sg: %p, dst_skip %u\n",
+		 rctx->dst_sg, rctx->dst_skip);
+	flow_log("  iv_ctr_len:%u\n", rctx->iv_ctr_len);
+	flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
+	flow_log("  authkeylen:%u\n", ctx->authkeylen);
+	flow_log("  is_esp: %s\n", ctx->is_esp ? "yes" : "no");
+
+	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
+		flow_log("  max_payload infinite");
+	else
+		flow_log("  max_payload: %u\n", ctx->max_payload);
+
+	if (unlikely(aead_need_fallback(req)))
+		return aead_do_fallback(req, is_encrypt);
+
+	/*
+	 * Do memory allocations for request after fallback check, because if we
+	 * do fallback, we won't call finish_req() to dealloc.
+	 */
+	if (rctx->iv_ctr_len) {
+		if (ctx->salt_len)
+			memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
+			       ctx->salt, ctx->salt_len);
+		memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
+		       req->iv,
+		       rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
+	}
+
+	rctx->chan_idx = select_channel();
+	err = handle_aead_req(rctx);
+	if (err != -EINPROGRESS)
+		/* synchronous result */
+		spu_chunk_cleanup(rctx);
+
+	return err;
+}
+
+static int aead_authenc_setkey(struct crypto_aead *cipher,
+			       const u8 *key, unsigned int keylen)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+	struct rtattr *rta = (void *)key;
+	struct crypto_authenc_key_param *param;
+	const u8 *origkey = key;
+	const unsigned int origkeylen = keylen;
+
+	int ret = 0;
+
+	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
+		 keylen);
+	flow_dump("  key: ", key, keylen);
+
+	if (!RTA_OK(rta, keylen))
+		goto badkey;
+	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+		goto badkey;
+	if (RTA_PAYLOAD(rta) < sizeof(*param))
+		goto badkey;
+
+	param = RTA_DATA(rta);
+	ctx->enckeylen = be32_to_cpu(param->enckeylen);
+
+	key += RTA_ALIGN(rta->rta_len);
+	keylen -= RTA_ALIGN(rta->rta_len);
+
+	if (keylen < ctx->enckeylen)
+		goto badkey;
+	if (ctx->enckeylen > MAX_KEY_SIZE)
+		goto badkey;
+
+	ctx->authkeylen = keylen - ctx->enckeylen;
+
+	if (ctx->authkeylen > MAX_KEY_SIZE)
+		goto badkey;
+
+	memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
+	/* May end up padding auth key. So make sure it's zeroed. */
+	memset(ctx->authkey, 0, sizeof(ctx->authkey));
+	memcpy(ctx->authkey, key, ctx->authkeylen);
+
+	switch (ctx->alg->cipher_info.alg) {
+	case CIPHER_ALG_DES:
+		if (ctx->enckeylen == DES_KEY_SIZE) {
+			u32 tmp[DES_EXPKEY_WORDS];
+			u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
+
+			if (des_ekey(tmp, key) == 0) {
+				if (crypto_aead_get_flags(cipher) &
+				    CRYPTO_TFM_REQ_WEAK_KEY) {
+					crypto_aead_set_flags(cipher, flags);
+					return -EINVAL;
+				}
+			}
+
+			ctx->cipher_type = CIPHER_TYPE_DES;
+		} else {
+			goto badkey;
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
+			const u32 *K = (const u32 *)key;
+			u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+
+			if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+			    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+				crypto_aead_set_flags(cipher, flags);
+				return -EINVAL;
+			}
+
+			ctx->cipher_type = CIPHER_TYPE_3DES;
+		} else {
+			crypto_aead_set_flags(cipher,
+					      CRYPTO_TFM_RES_BAD_KEY_LEN);
+			return -EINVAL;
+		}
+		break;
+	case CIPHER_ALG_AES:
+		switch (ctx->enckeylen) {
+		case AES_KEYSIZE_128:
+			ctx->cipher_type = CIPHER_TYPE_AES128;
+			break;
+		case AES_KEYSIZE_192:
+			ctx->cipher_type = CIPHER_TYPE_AES192;
+			break;
+		case AES_KEYSIZE_256:
+			ctx->cipher_type = CIPHER_TYPE_AES256;
+			break;
+		default:
+			goto badkey;
+		}
+		break;
+	case CIPHER_ALG_RC4:
+		ctx->cipher_type = CIPHER_TYPE_INIT;
+		break;
+	default:
+		pr_err("%s() Error: Unknown cipher alg\n", __func__);
+		return -EINVAL;
+	}
+
+	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
+		 ctx->authkeylen);
+	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
+	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
+
+	/* setkey the fallback just in case we needto use it */
+	if (ctx->fallback_cipher) {
+		flow_log("  running fallback setkey()\n");
+
+		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+		ctx->fallback_cipher->base.crt_flags |=
+		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
+		ret =
+		    crypto_aead_setkey(ctx->fallback_cipher, origkey,
+				       origkeylen);
+		if (ret) {
+			flow_log("  fallback setkey() returned:%d\n", ret);
+			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+			tfm->crt_flags |=
+			    (ctx->fallback_cipher->base.crt_flags &
+			     CRYPTO_TFM_RES_MASK);
+		}
+	}
+
+	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
+							  ctx->enckeylen,
+							  false);
+
+	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
+
+	return ret;
+
+badkey:
+	ctx->enckeylen = 0;
+	ctx->authkeylen = 0;
+	ctx->digestsize = 0;
+
+	crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
+			       const u8 *key, unsigned int keylen)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+
+	int ret = 0;
+
+	flow_log("%s() keylen:%u\n", __func__, keylen);
+	flow_dump("  key: ", key, keylen);
+
+	if (!ctx->is_esp)
+		ctx->digestsize = keylen;
+
+	ctx->enckeylen = keylen;
+	ctx->authkeylen = 0;
+	memcpy(ctx->enckey, key, ctx->enckeylen);
+
+	switch (ctx->enckeylen) {
+	case AES_KEYSIZE_128:
+		ctx->cipher_type = CIPHER_TYPE_AES128;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->cipher_type = CIPHER_TYPE_AES192;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->cipher_type = CIPHER_TYPE_AES256;
+		break;
+	default:
+		goto badkey;
+	}
+
+	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
+		 ctx->authkeylen);
+	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
+	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
+
+	/* setkey the fallback just in case we need to use it */
+	if (ctx->fallback_cipher) {
+		flow_log("  running fallback setkey()\n");
+
+		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+		ctx->fallback_cipher->base.crt_flags |=
+		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
+		ret = crypto_aead_setkey(ctx->fallback_cipher, key,
+					 keylen + ctx->salt_len);
+		if (ret) {
+			flow_log("  fallback setkey() returned:%d\n", ret);
+			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+			tfm->crt_flags |=
+			    (ctx->fallback_cipher->base.crt_flags &
+			     CRYPTO_TFM_RES_MASK);
+		}
+	}
+
+	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
+							  ctx->enckeylen,
+							  false);
+
+	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
+
+	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
+		 ctx->authkeylen);
+
+	return ret;
+
+badkey:
+	ctx->enckeylen = 0;
+	ctx->authkeylen = 0;
+	ctx->digestsize = 0;
+
+	crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+/**
+ * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
+ * @cipher: AEAD structure
+ * @key:    Key followed by 4 bytes of salt
+ * @keylen: Length of key plus salt, in bytes
+ *
+ * Extracts salt from key and stores it to be prepended to IV on each request.
+ * Digest is always 16 bytes
+ *
+ * Return: Value from generic gcm setkey.
+ */
+static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
+			       const u8 *key, unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+
+	flow_log("%s\n", __func__);
+	ctx->salt_len = GCM_ESP_SALT_SIZE;
+	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
+	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
+	keylen -= GCM_ESP_SALT_SIZE;
+	ctx->digestsize = GCM_ESP_DIGESTSIZE;
+	ctx->is_esp = true;
+	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
+
+	return aead_gcm_ccm_setkey(cipher, key, keylen);
+}
+
+/**
+ * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
+ * cipher: AEAD structure
+ * key:    Key followed by 4 bytes of salt
+ * keylen: Length of key plus salt, in bytes
+ *
+ * Extracts salt from key and stores it to be prepended to IV on each request.
+ * Digest is always 16 bytes
+ *
+ * Return: Value from generic gcm setkey.
+ */
+static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
+				  const u8 *key, unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+
+	flow_log("%s\n", __func__);
+	ctx->salt_len = GCM_ESP_SALT_SIZE;
+	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
+	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
+	keylen -= GCM_ESP_SALT_SIZE;
+	ctx->digestsize = GCM_ESP_DIGESTSIZE;
+	ctx->is_esp = true;
+	ctx->is_rfc4543 = true;
+	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
+
+	return aead_gcm_ccm_setkey(cipher, key, keylen);
+}
+
+/**
+ * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
+ * @cipher: AEAD structure
+ * @key:    Key followed by 4 bytes of salt
+ * @keylen: Length of key plus salt, in bytes
+ *
+ * Extracts salt from key and stores it to be prepended to IV on each request.
+ * Digest is always 16 bytes
+ *
+ * Return: Value from generic ccm setkey.
+ */
+static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
+			       const u8 *key, unsigned int keylen)
+{
+	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+
+	flow_log("%s\n", __func__);
+	ctx->salt_len = CCM_ESP_SALT_SIZE;
+	ctx->salt_offset = CCM_ESP_SALT_OFFSET;
+	memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
+	keylen -= CCM_ESP_SALT_SIZE;
+	ctx->is_esp = true;
+	flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
+
+	return aead_gcm_ccm_setkey(cipher, key, keylen);
+}
+
+static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
+{
+	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+	int ret = 0;
+
+	flow_log("%s() authkeylen:%u authsize:%u\n",
+		 __func__, ctx->authkeylen, authsize);
+
+	ctx->digestsize = authsize;
+
+	/* setkey the fallback just in case we needto use it */
+	if (ctx->fallback_cipher) {
+		flow_log("  running fallback setauth()\n");
+
+		ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
+		if (ret)
+			flow_log("  fallback setauth() returned:%d\n", ret);
+	}
+
+	return ret;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+	flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
+		 req->cryptlen);
+	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
+	flow_log("  assoc_len:%u\n", req->assoclen);
+
+	return aead_enqueue(req, true);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+	flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
+	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
+	flow_log("  assoc_len:%u\n", req->assoclen);
+
+	return aead_enqueue(req, false);
+}
+
+/* ==================== Supported Cipher Algorithms ==================== */
+
+static struct iproc_alg_s driver_algs[] = {
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "gcm(aes)",
+			.cra_driver_name = "gcm-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
+		 },
+		 .setkey = aead_gcm_ccm_setkey,
+		 .ivsize = GCM_AES_IV_SIZE,
+		.maxauthsize = AES_BLOCK_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_GCM,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_AES,
+		       .mode = HASH_MODE_GCM,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "ccm(aes)",
+			.cra_driver_name = "ccm-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
+		 },
+		 .setkey = aead_gcm_ccm_setkey,
+		 .ivsize = CCM_AES_IV_SIZE,
+		.maxauthsize = AES_BLOCK_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_CCM,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_AES,
+		       .mode = HASH_MODE_CCM,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "rfc4106(gcm(aes))",
+			.cra_driver_name = "gcm-aes-esp-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
+		 },
+		 .setkey = aead_gcm_esp_setkey,
+		 .ivsize = GCM_RFC4106_IV_SIZE,
+		 .maxauthsize = AES_BLOCK_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_GCM,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_AES,
+		       .mode = HASH_MODE_GCM,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "rfc4309(ccm(aes))",
+			.cra_driver_name = "ccm-aes-esp-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
+		 },
+		 .setkey = aead_ccm_esp_setkey,
+		 .ivsize = CCM_AES_IV_SIZE,
+		 .maxauthsize = AES_BLOCK_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_CCM,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_AES,
+		       .mode = HASH_MODE_CCM,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "rfc4543(gcm(aes))",
+			.cra_driver_name = "gmac-aes-esp-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
+		 },
+		 .setkey = rfc4543_gcm_esp_setkey,
+		 .ivsize = GCM_RFC4106_IV_SIZE,
+		 .maxauthsize = AES_BLOCK_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_GCM,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_AES,
+		       .mode = HASH_MODE_GCM,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(md5),cbc(aes))",
+			.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = MD5_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_MD5,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha1),cbc(aes))",
+			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = AES_BLOCK_SIZE,
+		 .maxauthsize = SHA1_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA1,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha256),cbc(aes))",
+			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = AES_BLOCK_SIZE,
+		 .maxauthsize = SHA256_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA256,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(md5),cbc(des))",
+			.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES_BLOCK_SIZE,
+		 .maxauthsize = MD5_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_MD5,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha1),cbc(des))",
+			.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES_BLOCK_SIZE,
+		 .maxauthsize = SHA1_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA1,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha224),cbc(des))",
+			.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES_BLOCK_SIZE,
+		 .maxauthsize = SHA224_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA224,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha256),cbc(des))",
+			.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES_BLOCK_SIZE,
+		 .maxauthsize = SHA256_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA256,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha384),cbc(des))",
+			.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES_BLOCK_SIZE,
+		 .maxauthsize = SHA384_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA384,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha512),cbc(des))",
+			.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES_BLOCK_SIZE,
+		 .maxauthsize = SHA512_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA512,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+			.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES3_EDE_BLOCK_SIZE,
+		 .maxauthsize = MD5_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_MD5,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+			.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES3_EDE_BLOCK_SIZE,
+		 .maxauthsize = SHA1_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA1,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
+			.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES3_EDE_BLOCK_SIZE,
+		 .maxauthsize = SHA224_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA224,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+			.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES3_EDE_BLOCK_SIZE,
+		 .maxauthsize = SHA256_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA256,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
+			.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES3_EDE_BLOCK_SIZE,
+		 .maxauthsize = SHA384_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA384,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AEAD,
+	 .alg.aead = {
+		 .base = {
+			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
+			.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+		 },
+		 .setkey = aead_authenc_setkey,
+		 .ivsize = DES3_EDE_BLOCK_SIZE,
+		 .maxauthsize = SHA512_DIGEST_SIZE,
+	 },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA512,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 .auth_first = 0,
+	 },
+
+/* ABLKCIPHER algorithms. */
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "ecb(arc4)",
+			.cra_driver_name = "ecb-arc4-iproc",
+			.cra_blocksize = ARC4_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = ARC4_MIN_KEY_SIZE,
+					   .max_keysize = ARC4_MAX_KEY_SIZE,
+					   .ivsize = 0,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_RC4,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "ofb(des)",
+			.cra_driver_name = "ofb-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = DES_KEY_SIZE,
+					   .max_keysize = DES_KEY_SIZE,
+					   .ivsize = DES_BLOCK_SIZE,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_OFB,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "cbc-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = DES_KEY_SIZE,
+					   .max_keysize = DES_KEY_SIZE,
+					   .ivsize = DES_BLOCK_SIZE,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "ecb-des-iproc",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = DES_KEY_SIZE,
+					   .max_keysize = DES_KEY_SIZE,
+					   .ivsize = 0,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_DES,
+			 .mode = CIPHER_MODE_ECB,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "ofb(des3_ede)",
+			.cra_driver_name = "ofb-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = DES3_EDE_KEY_SIZE,
+					   .max_keysize = DES3_EDE_KEY_SIZE,
+					   .ivsize = DES3_EDE_BLOCK_SIZE,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_OFB,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "cbc-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = DES3_EDE_KEY_SIZE,
+					   .max_keysize = DES3_EDE_KEY_SIZE,
+					   .ivsize = DES3_EDE_BLOCK_SIZE,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "ecb-des3-iproc",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = DES3_EDE_KEY_SIZE,
+					   .max_keysize = DES3_EDE_KEY_SIZE,
+					   .ivsize = 0,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_3DES,
+			 .mode = CIPHER_MODE_ECB,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "ofb(aes)",
+			.cra_driver_name = "ofb-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = AES_MIN_KEY_SIZE,
+					   .max_keysize = AES_MAX_KEY_SIZE,
+					   .ivsize = AES_BLOCK_SIZE,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_OFB,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "cbc-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = AES_MIN_KEY_SIZE,
+					   .max_keysize = AES_MAX_KEY_SIZE,
+					   .ivsize = AES_BLOCK_SIZE,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_CBC,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "ecb-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   .min_keysize = AES_MIN_KEY_SIZE,
+					   .max_keysize = AES_MAX_KEY_SIZE,
+					   .ivsize = 0,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_ECB,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "ctr(aes)",
+			.cra_driver_name = "ctr-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ablkcipher = {
+					   /* .geniv = "chainiv", */
+					   .min_keysize = AES_MIN_KEY_SIZE,
+					   .max_keysize = AES_MAX_KEY_SIZE,
+					   .ivsize = AES_BLOCK_SIZE,
+					}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_CTR,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+{
+	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	 .alg.crypto = {
+			.cra_name = "xts(aes)",
+			.cra_driver_name = "xts-aes-iproc",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ablkcipher = {
+				.min_keysize = 2 * AES_MIN_KEY_SIZE,
+				.max_keysize = 2 * AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+				}
+			},
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_AES,
+			 .mode = CIPHER_MODE_XTS,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_NONE,
+		       .mode = HASH_MODE_NONE,
+		       },
+	 },
+
+/* AHASH algorithms. */
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = MD5_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "md5",
+				    .cra_driver_name = "md5-iproc",
+				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
+				    .cra_flags = CRYPTO_ALG_ASYNC,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_MD5,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = MD5_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(md5)",
+				    .cra_driver_name = "hmac-md5-iproc",
+				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_MD5,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{.type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA1_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "sha1",
+				    .cra_driver_name = "sha1-iproc",
+				    .cra_blocksize = SHA1_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA1,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{.type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA1_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha1)",
+				    .cra_driver_name = "hmac-sha1-iproc",
+				    .cra_blocksize = SHA1_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA1,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{.type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.base = {
+				    .cra_name = "sha224",
+				    .cra_driver_name = "sha224-iproc",
+				    .cra_blocksize = SHA224_BLOCK_SIZE,
+			}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA224,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{.type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA224_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha224)",
+				    .cra_driver_name = "hmac-sha224-iproc",
+				    .cra_blocksize = SHA224_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA224,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{.type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA256_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "sha256",
+				    .cra_driver_name = "sha256-iproc",
+				    .cra_blocksize = SHA256_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA256,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{.type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA256_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha256)",
+				    .cra_driver_name = "hmac-sha256-iproc",
+				    .cra_blocksize = SHA256_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA256,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{
+	.type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA384_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "sha384",
+				    .cra_driver_name = "sha384-iproc",
+				    .cra_blocksize = SHA384_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA384,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA384_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha384)",
+				    .cra_driver_name = "hmac-sha384-iproc",
+				    .cra_blocksize = SHA384_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA384,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA512_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "sha512",
+				    .cra_driver_name = "sha512-iproc",
+				    .cra_blocksize = SHA512_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA512,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA512_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha512)",
+				    .cra_driver_name = "hmac-sha512-iproc",
+				    .cra_blocksize = SHA512_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA512,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "sha3-224",
+				    .cra_driver_name = "sha3-224-iproc",
+				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA3_224,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha3-224)",
+				    .cra_driver_name = "hmac-sha3-224-iproc",
+				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA3_224,
+		       .mode = HASH_MODE_HMAC
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "sha3-256",
+				    .cra_driver_name = "sha3-256-iproc",
+				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA3_256,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha3-256)",
+				    .cra_driver_name = "hmac-sha3-256-iproc",
+				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA3_256,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "sha3-384",
+				    .cra_driver_name = "sha3-384-iproc",
+				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA3_384,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha3-384)",
+				    .cra_driver_name = "hmac-sha3-384-iproc",
+				    .cra_blocksize = SHA3_384_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA3_384,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "sha3-512",
+				    .cra_driver_name = "sha3-512-iproc",
+				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA3_512,
+		       .mode = HASH_MODE_HASH,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
+		      .halg.base = {
+				    .cra_name = "hmac(sha3-512)",
+				    .cra_driver_name = "hmac-sha3-512-iproc",
+				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_SHA3_512,
+		       .mode = HASH_MODE_HMAC,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = AES_BLOCK_SIZE,
+		      .halg.base = {
+				    .cra_name = "xcbc(aes)",
+				    .cra_driver_name = "xcbc-aes-iproc",
+				    .cra_blocksize = AES_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_AES,
+		       .mode = HASH_MODE_XCBC,
+		       },
+	 },
+	{
+	 .type = CRYPTO_ALG_TYPE_AHASH,
+	 .alg.hash = {
+		      .halg.digestsize = AES_BLOCK_SIZE,
+		      .halg.base = {
+				    .cra_name = "cmac(aes)",
+				    .cra_driver_name = "cmac-aes-iproc",
+				    .cra_blocksize = AES_BLOCK_SIZE,
+				}
+		      },
+	 .cipher_info = {
+			 .alg = CIPHER_ALG_NONE,
+			 .mode = CIPHER_MODE_NONE,
+			 },
+	 .auth_info = {
+		       .alg = HASH_ALG_AES,
+		       .mode = HASH_MODE_CMAC,
+		       },
+	 },
+};
+
+static int generic_cra_init(struct crypto_tfm *tfm,
+			    struct iproc_alg_s *cipher_alg)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
+	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
+
+	flow_log("%s()\n", __func__);
+
+	ctx->alg = cipher_alg;
+	ctx->cipher = cipher_alg->cipher_info;
+	ctx->auth = cipher_alg->auth_info;
+	ctx->auth_first = cipher_alg->auth_first;
+	ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
+						    ctx->cipher.mode,
+						    blocksize);
+	ctx->fallback_cipher = NULL;
+
+	ctx->enckeylen = 0;
+	ctx->authkeylen = 0;
+
+	atomic_inc(&iproc_priv.stream_count);
+	atomic_inc(&iproc_priv.session_count);
+
+	return 0;
+}
+
+static int ablkcipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct iproc_alg_s *cipher_alg;
+
+	flow_log("%s()\n", __func__);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
+
+	cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
+	return generic_cra_init(tfm, cipher_alg);
+}
+
+static int ahash_cra_init(struct crypto_tfm *tfm)
+{
+	int err;
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct iproc_alg_s *cipher_alg;
+
+	cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
+				  alg.hash);
+
+	err = generic_cra_init(tfm, cipher_alg);
+	flow_log("%s()\n", __func__);
+
+	/*
+	 * export state size has to be < 512 bytes. So don't include msg bufs
+	 * in state size.
+	 */
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct iproc_reqctx_s));
+
+	return err;
+}
+
+static int aead_cra_init(struct crypto_aead *aead)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
+	struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
+						      alg.aead);
+
+	int err = generic_cra_init(tfm, cipher_alg);
+
+	flow_log("%s()\n", __func__);
+
+	crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
+	ctx->is_esp = false;
+	ctx->salt_len = 0;
+	ctx->salt_offset = 0;
+
+	/* random first IV */
+	get_random_bytes(ctx->iv, MAX_IV_SIZE);
+	flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE);
+
+	if (!err) {
+		if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+			flow_log("%s() creating fallback cipher\n", __func__);
+
+			ctx->fallback_cipher =
+			    crypto_alloc_aead(alg->cra_name, 0,
+					      CRYPTO_ALG_ASYNC |
+					      CRYPTO_ALG_NEED_FALLBACK);
+			if (IS_ERR(ctx->fallback_cipher)) {
+				pr_err("%s() Error: failed to allocate fallback for %s\n",
+				       __func__, alg->cra_name);
+				return PTR_ERR(ctx->fallback_cipher);
+			}
+		}
+	}
+
+	return err;
+}
+
+static void generic_cra_exit(struct crypto_tfm *tfm)
+{
+	atomic_dec(&iproc_priv.session_count);
+}
+
+static void aead_cra_exit(struct crypto_aead *aead)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
+
+	generic_cra_exit(tfm);
+
+	if (ctx->fallback_cipher) {
+		crypto_free_aead(ctx->fallback_cipher);
+		ctx->fallback_cipher = NULL;
+	}
+}
+
+/**
+ * spu_functions_register() - Specify hardware-specific SPU functions based on
+ * SPU type read from device tree.
+ * @dev:	device structure
+ * @spu_type:	SPU hardware generation
+ * @spu_subtype: SPU hardware version
+ */
+static void spu_functions_register(struct device *dev,
+				   enum spu_spu_type spu_type,
+				   enum spu_spu_subtype spu_subtype)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+
+	if (spu_type == SPU_TYPE_SPUM) {
+		dev_dbg(dev, "Registering SPUM functions");
+		spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
+		spu->spu_payload_length = spum_payload_length;
+		spu->spu_response_hdr_len = spum_response_hdr_len;
+		spu->spu_hash_pad_len = spum_hash_pad_len;
+		spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
+		spu->spu_assoc_resp_len = spum_assoc_resp_len;
+		spu->spu_aead_ivlen = spum_aead_ivlen;
+		spu->spu_hash_type = spum_hash_type;
+		spu->spu_digest_size = spum_digest_size;
+		spu->spu_create_request = spum_create_request;
+		spu->spu_cipher_req_init = spum_cipher_req_init;
+		spu->spu_cipher_req_finish = spum_cipher_req_finish;
+		spu->spu_request_pad = spum_request_pad;
+		spu->spu_tx_status_len = spum_tx_status_len;
+		spu->spu_rx_status_len = spum_rx_status_len;
+		spu->spu_status_process = spum_status_process;
+		spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
+		spu->spu_ccm_update_iv = spum_ccm_update_iv;
+		spu->spu_wordalign_padlen = spum_wordalign_padlen;
+		if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
+			spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
+		else
+			spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
+	} else {
+		dev_dbg(dev, "Registering SPU2 functions");
+		spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
+		spu->spu_ctx_max_payload = spu2_ctx_max_payload;
+		spu->spu_payload_length = spu2_payload_length;
+		spu->spu_response_hdr_len = spu2_response_hdr_len;
+		spu->spu_hash_pad_len = spu2_hash_pad_len;
+		spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
+		spu->spu_assoc_resp_len = spu2_assoc_resp_len;
+		spu->spu_aead_ivlen = spu2_aead_ivlen;
+		spu->spu_hash_type = spu2_hash_type;
+		spu->spu_digest_size = spu2_digest_size;
+		spu->spu_create_request = spu2_create_request;
+		spu->spu_cipher_req_init = spu2_cipher_req_init;
+		spu->spu_cipher_req_finish = spu2_cipher_req_finish;
+		spu->spu_request_pad = spu2_request_pad;
+		spu->spu_tx_status_len = spu2_tx_status_len;
+		spu->spu_rx_status_len = spu2_rx_status_len;
+		spu->spu_status_process = spu2_status_process;
+		spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
+		spu->spu_ccm_update_iv = spu2_ccm_update_iv;
+		spu->spu_wordalign_padlen = spu2_wordalign_padlen;
+	}
+}
+
+/**
+ * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
+ * channel for the SPU being probed.
+ * @dev:  SPU driver device structure
+ *
+ * Return: 0 if successful
+ *	   < 0 otherwise
+ */
+static int spu_mb_init(struct device *dev)
+{
+	struct mbox_client *mcl = &iproc_priv.mcl;
+	int err, i;
+
+	iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
+				  sizeof(struct mbox_chan *), GFP_KERNEL);
+	if (!iproc_priv.mbox)
+		return -ENOMEM;
+
+	mcl->dev = dev;
+	mcl->tx_block = false;
+	mcl->tx_tout = 0;
+	mcl->knows_txdone = true;
+	mcl->rx_callback = spu_rx_callback;
+	mcl->tx_done = NULL;
+
+	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
+		iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
+		if (IS_ERR(iproc_priv.mbox[i])) {
+			err = (int)PTR_ERR(iproc_priv.mbox[i]);
+			dev_err(dev,
+				"Mbox channel %d request failed with err %d",
+				i, err);
+			iproc_priv.mbox[i] = NULL;
+			goto free_channels;
+		}
+	}
+
+	return 0;
+free_channels:
+	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
+		if (iproc_priv.mbox[i])
+			mbox_free_channel(iproc_priv.mbox[i]);
+	}
+
+	return err;
+}
+
+static void spu_mb_release(struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < iproc_priv.spu.num_chan; i++)
+		mbox_free_channel(iproc_priv.mbox[i]);
+}
+
+static void spu_counters_init(void)
+{
+	int i;
+	int j;
+
+	atomic_set(&iproc_priv.session_count, 0);
+	atomic_set(&iproc_priv.stream_count, 0);
+	atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
+	atomic64_set(&iproc_priv.bytes_in, 0);
+	atomic64_set(&iproc_priv.bytes_out, 0);
+	for (i = 0; i < SPU_OP_NUM; i++) {
+		atomic_set(&iproc_priv.op_counts[i], 0);
+		atomic_set(&iproc_priv.setkey_cnt[i], 0);
+	}
+	for (i = 0; i < CIPHER_ALG_LAST; i++)
+		for (j = 0; j < CIPHER_MODE_LAST; j++)
+			atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
+
+	for (i = 0; i < HASH_ALG_LAST; i++) {
+		atomic_set(&iproc_priv.hash_cnt[i], 0);
+		atomic_set(&iproc_priv.hmac_cnt[i], 0);
+	}
+	for (i = 0; i < AEAD_TYPE_LAST; i++)
+		atomic_set(&iproc_priv.aead_cnt[i], 0);
+
+	atomic_set(&iproc_priv.mb_no_spc, 0);
+	atomic_set(&iproc_priv.mb_send_fail, 0);
+	atomic_set(&iproc_priv.bad_icv, 0);
+}
+
+static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct crypto_alg *crypto = &driver_alg->alg.crypto;
+	int err;
+
+	/* SPU2 does not support RC4 */
+	if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
+	    (spu->spu_type == SPU_TYPE_SPU2))
+		return 0;
+
+	crypto->cra_module = THIS_MODULE;
+	crypto->cra_priority = cipher_pri;
+	crypto->cra_alignmask = 0;
+	crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
+	INIT_LIST_HEAD(&crypto->cra_list);
+
+	crypto->cra_init = ablkcipher_cra_init;
+	crypto->cra_exit = generic_cra_exit;
+	crypto->cra_type = &crypto_ablkcipher_type;
+	crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+	crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
+	crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
+	crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+
+	err = crypto_register_alg(crypto);
+	/* Mark alg as having been registered, if successful */
+	if (err == 0)
+		driver_alg->registered = true;
+	pr_debug("  registered ablkcipher %s\n", crypto->cra_driver_name);
+	return err;
+}
+
+static int spu_register_ahash(struct iproc_alg_s *driver_alg)
+{
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct ahash_alg *hash = &driver_alg->alg.hash;
+	int err;
+
+	/* AES-XCBC is the only AES hash type currently supported on SPU-M */
+	if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
+	    (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
+	    (spu->spu_type == SPU_TYPE_SPUM))
+		return 0;
+
+	/* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
+	if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
+	    (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
+		return 0;
+
+	hash->halg.base.cra_module = THIS_MODULE;
+	hash->halg.base.cra_priority = hash_pri;
+	hash->halg.base.cra_alignmask = 0;
+	hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
+	hash->halg.base.cra_init = ahash_cra_init;
+	hash->halg.base.cra_exit = generic_cra_exit;
+	hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+	hash->halg.statesize = sizeof(struct spu_hash_export_s);
+
+	if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
+		hash->setkey = ahash_setkey;
+		hash->init = ahash_init;
+		hash->update = ahash_update;
+		hash->final = ahash_final;
+		hash->finup = ahash_finup;
+		hash->digest = ahash_digest;
+	} else {
+		hash->setkey = ahash_hmac_setkey;
+		hash->init = ahash_hmac_init;
+		hash->update = ahash_hmac_update;
+		hash->final = ahash_hmac_final;
+		hash->finup = ahash_hmac_finup;
+		hash->digest = ahash_hmac_digest;
+	}
+	hash->export = ahash_export;
+	hash->import = ahash_import;
+
+	err = crypto_register_ahash(hash);
+	/* Mark alg as having been registered, if successful */
+	if (err == 0)
+		driver_alg->registered = true;
+	pr_debug("  registered ahash %s\n",
+		 hash->halg.base.cra_driver_name);
+	return err;
+}
+
+static int spu_register_aead(struct iproc_alg_s *driver_alg)
+{
+	struct aead_alg *aead = &driver_alg->alg.aead;
+	int err;
+
+	aead->base.cra_module = THIS_MODULE;
+	aead->base.cra_priority = aead_pri;
+	aead->base.cra_alignmask = 0;
+	aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
+	INIT_LIST_HEAD(&aead->base.cra_list);
+
+	aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
+	/* setkey set in alg initialization */
+	aead->setauthsize = aead_setauthsize;
+	aead->encrypt = aead_encrypt;
+	aead->decrypt = aead_decrypt;
+	aead->init = aead_cra_init;
+	aead->exit = aead_cra_exit;
+
+	err = crypto_register_aead(aead);
+	/* Mark alg as having been registered, if successful */
+	if (err == 0)
+		driver_alg->registered = true;
+	pr_debug("  registered aead %s\n", aead->base.cra_driver_name);
+	return err;
+}
+
+/* register crypto algorithms the device supports */
+static int spu_algs_register(struct device *dev)
+{
+	int i, j;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		switch (driver_algs[i].type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			err = spu_register_ablkcipher(&driver_algs[i]);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			err = spu_register_ahash(&driver_algs[i]);
+			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			err = spu_register_aead(&driver_algs[i]);
+			break;
+		default:
+			dev_err(dev,
+				"iproc-crypto: unknown alg type: %d",
+				driver_algs[i].type);
+			err = -EINVAL;
+		}
+
+		if (err) {
+			dev_err(dev, "alg registration failed with error %d\n",
+				err);
+			goto err_algs;
+		}
+	}
+
+	return 0;
+
+err_algs:
+	for (j = 0; j < i; j++) {
+		/* Skip any algorithm not registered */
+		if (!driver_algs[j].registered)
+			continue;
+		switch (driver_algs[j].type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			crypto_unregister_alg(&driver_algs[j].alg.crypto);
+			driver_algs[j].registered = false;
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&driver_algs[j].alg.hash);
+			driver_algs[j].registered = false;
+			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			crypto_unregister_aead(&driver_algs[j].alg.aead);
+			driver_algs[j].registered = false;
+			break;
+		}
+	}
+	return err;
+}
+
+/* ==================== Kernel Platform API ==================== */
+
+static struct spu_type_subtype spum_ns2_types = {
+	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
+};
+
+static struct spu_type_subtype spum_nsp_types = {
+	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
+};
+
+static struct spu_type_subtype spu2_types = {
+	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
+};
+
+static struct spu_type_subtype spu2_v2_types = {
+	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
+};
+
+static const struct of_device_id bcm_spu_dt_ids[] = {
+	{
+		.compatible = "brcm,spum-crypto",
+		.data = &spum_ns2_types,
+	},
+	{
+		.compatible = "brcm,spum-nsp-crypto",
+		.data = &spum_nsp_types,
+	},
+	{
+		.compatible = "brcm,spu2-crypto",
+		.data = &spu2_types,
+	},
+	{
+		.compatible = "brcm,spu2-v2-crypto",
+		.data = &spu2_v2_types,
+	},
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
+
+static int spu_dt_read(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct spu_hw *spu = &iproc_priv.spu;
+	struct resource *spu_ctrl_regs;
+	const struct spu_type_subtype *matched_spu_type;
+	struct device_node *dn = pdev->dev.of_node;
+	int err, i;
+
+	/* Count number of mailbox channels */
+	spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
+
+	matched_spu_type = of_device_get_match_data(dev);
+	if (!matched_spu_type) {
+		dev_err(&pdev->dev, "Failed to match device\n");
+		return -ENODEV;
+	}
+
+	spu->spu_type = matched_spu_type->type;
+	spu->spu_subtype = matched_spu_type->subtype;
+
+	i = 0;
+	for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
+		platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
+
+		spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
+		if (IS_ERR(spu->reg_vbase[i])) {
+			err = PTR_ERR(spu->reg_vbase[i]);
+			dev_err(&pdev->dev, "Failed to map registers: %d\n",
+				err);
+			spu->reg_vbase[i] = NULL;
+			return err;
+		}
+	}
+	spu->num_spu = i;
+	dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
+
+	return 0;
+}
+
+int bcm_spu_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct spu_hw *spu = &iproc_priv.spu;
+	int err = 0;
+
+	iproc_priv.pdev  = pdev;
+	platform_set_drvdata(iproc_priv.pdev,
+			     &iproc_priv);
+
+	err = spu_dt_read(pdev);
+	if (err < 0)
+		goto failure;
+
+	err = spu_mb_init(&pdev->dev);
+	if (err < 0)
+		goto failure;
+
+	if (spu->spu_type == SPU_TYPE_SPUM)
+		iproc_priv.bcm_hdr_len = 8;
+	else if (spu->spu_type == SPU_TYPE_SPU2)
+		iproc_priv.bcm_hdr_len = 0;
+
+	spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
+
+	spu_counters_init();
+
+	spu_setup_debugfs();
+
+	err = spu_algs_register(dev);
+	if (err < 0)
+		goto fail_reg;
+
+	return 0;
+
+fail_reg:
+	spu_free_debugfs();
+failure:
+	spu_mb_release(pdev);
+	dev_err(dev, "%s failed with error %d.\n", __func__, err);
+
+	return err;
+}
+
+int bcm_spu_remove(struct platform_device *pdev)
+{
+	int i;
+	struct device *dev = &pdev->dev;
+	char *cdn;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		/*
+		 * Not all algorithms were registered, depending on whether
+		 * hardware is SPU or SPU2.  So here we make sure to skip
+		 * those algorithms that were not previously registered.
+		 */
+		if (!driver_algs[i].registered)
+			continue;
+
+		switch (driver_algs[i].type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			crypto_unregister_alg(&driver_algs[i].alg.crypto);
+			dev_dbg(dev, "  unregistered cipher %s\n",
+				driver_algs[i].alg.crypto.cra_driver_name);
+			driver_algs[i].registered = false;
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&driver_algs[i].alg.hash);
+			cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
+			dev_dbg(dev, "  unregistered hash %s\n", cdn);
+			driver_algs[i].registered = false;
+			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			crypto_unregister_aead(&driver_algs[i].alg.aead);
+			dev_dbg(dev, "  unregistered aead %s\n",
+				driver_algs[i].alg.aead.base.cra_driver_name);
+			driver_algs[i].registered = false;
+			break;
+		}
+	}
+	spu_free_debugfs();
+	spu_mb_release(pdev);
+	return 0;
+}
+
+/* ===== Kernel Module API ===== */
+
+static struct platform_driver bcm_spu_pdriver = {
+	.driver = {
+		   .name = "brcm-spu-crypto",
+		   .of_match_table = of_match_ptr(bcm_spu_dt_ids),
+		   },
+	.probe = bcm_spu_probe,
+	.remove = bcm_spu_remove,
+};
+module_platform_driver(bcm_spu_pdriver);
+
+MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
new file mode 100644
index 0000000..763c425
--- /dev/null
+++ b/drivers/crypto/bcm/cipher.h
@@ -0,0 +1,483 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#ifndef _CIPHER_H
+#define _CIPHER_H
+
+#include <linux/atomic.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/mailbox_client.h>
+#include <crypto/aes.h>
+#include <crypto/internal/hash.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
+#include <crypto/sha.h>
+#include <crypto/sha3.h>
+
+#include "spu.h"
+#include "spum.h"
+#include "spu2.h"
+
+/* Driver supports up to MAX_SPUS SPU blocks */
+#define MAX_SPUS 16
+
+#define ARC4_MIN_KEY_SIZE   1
+#define ARC4_MAX_KEY_SIZE   256
+#define ARC4_BLOCK_SIZE     1
+#define ARC4_STATE_SIZE     4
+
+#define CCM_AES_IV_SIZE    16
+#define CCM_ESP_IV_SIZE     8
+#define RFC4543_ICV_SIZE   16
+
+#define MAX_KEY_SIZE	ARC4_MAX_KEY_SIZE
+#define MAX_IV_SIZE	AES_BLOCK_SIZE
+#define MAX_DIGEST_SIZE	SHA3_512_DIGEST_SIZE
+#define MAX_ASSOC_SIZE	512
+
+/* size of salt value for AES-GCM-ESP and AES-CCM-ESP */
+#define GCM_ESP_SALT_SIZE   4
+#define CCM_ESP_SALT_SIZE   3
+#define MAX_SALT_SIZE       GCM_ESP_SALT_SIZE
+#define GCM_ESP_SALT_OFFSET 0
+#define CCM_ESP_SALT_OFFSET 1
+
+#define GCM_ESP_DIGESTSIZE 16
+
+#define MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+/*
+ * Maximum number of bytes from a non-final hash request that can be deferred
+ * until more data is available. With new crypto API framework, this
+ * can be no more than one block of data.
+ */
+#define HASH_CARRY_MAX  MAX_HASH_BLOCK_SIZE
+
+/* Force at least 4-byte alignment of all SPU message fields */
+#define SPU_MSG_ALIGN  4
+
+/* Number of times to resend mailbox message if mb queue is full */
+#define SPU_MB_RETRY_MAX  1000
+
+/* op_counts[] indexes */
+enum op_type {
+	SPU_OP_CIPHER,
+	SPU_OP_HASH,
+	SPU_OP_HMAC,
+	SPU_OP_AEAD,
+	SPU_OP_NUM
+};
+
+enum spu_spu_type {
+	SPU_TYPE_SPUM,
+	SPU_TYPE_SPU2,
+};
+
+/*
+ * SPUM_NS2 and SPUM_NSP are the SPU-M block on Northstar 2 and Northstar Plus,
+ * respectively.
+ */
+enum spu_spu_subtype {
+	SPU_SUBTYPE_SPUM_NS2,
+	SPU_SUBTYPE_SPUM_NSP,
+	SPU_SUBTYPE_SPU2_V1,
+	SPU_SUBTYPE_SPU2_V2
+};
+
+struct spu_type_subtype {
+	enum spu_spu_type type;
+	enum spu_spu_subtype subtype;
+};
+
+struct cipher_op {
+	enum spu_cipher_alg alg;
+	enum spu_cipher_mode mode;
+};
+
+struct auth_op {
+	enum hash_alg alg;
+	enum hash_mode mode;
+};
+
+struct iproc_alg_s {
+	u32 type;
+	union {
+		struct crypto_alg crypto;
+		struct ahash_alg hash;
+		struct aead_alg aead;
+	} alg;
+	struct cipher_op cipher_info;
+	struct auth_op auth_info;
+	bool auth_first;
+	bool registered;
+};
+
+/*
+ * Buffers for a SPU request/reply message pair. All part of one structure to
+ * allow a single alloc per request.
+ */
+struct spu_msg_buf {
+	/* Request message fragments */
+
+	/*
+	 * SPU request message header. For SPU-M, holds MH, EMH, SCTX, BDESC,
+	 * and BD header. For SPU2, holds FMD, OMD.
+	 */
+	u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
+
+	/* IV or counter. Size to include salt. Also used for XTS tweek. */
+	u8 iv_ctr[ALIGN(2 * AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
+
+	/* Hash digest. request and response. */
+	u8 digest[ALIGN(MAX_DIGEST_SIZE, SPU_MSG_ALIGN)];
+
+	/* SPU request message padding */
+	u8 spu_req_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
+
+	/* SPU-M request message STATUS field */
+	u8 tx_stat[ALIGN(SPU_TX_STATUS_LEN, SPU_MSG_ALIGN)];
+
+	/* Response message fragments */
+
+	/* SPU response message header */
+	u8 spu_resp_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
+
+	/* SPU response message STATUS field padding */
+	u8 rx_stat_pad[ALIGN(SPU_STAT_PAD_MAX, SPU_MSG_ALIGN)];
+
+	/* SPU response message STATUS field */
+	u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
+
+	union {
+		/* Buffers only used for ablkcipher */
+		struct {
+			/*
+			 * Field used for either SUPDT when RC4 is used
+			 * -OR- tweak value when XTS/AES is used
+			 */
+			u8 supdt_tweak[ALIGN(SPU_SUPDT_LEN, SPU_MSG_ALIGN)];
+		} c;
+
+		/* Buffers only used for aead */
+		struct {
+			/* SPU response pad for GCM data */
+			u8 gcmpad[ALIGN(AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
+
+			/* SPU request msg padding for GCM AAD */
+			u8 req_aad_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
+
+			/* SPU response data to be discarded */
+			u8 resp_aad[ALIGN(MAX_ASSOC_SIZE + MAX_IV_SIZE,
+					  SPU_MSG_ALIGN)];
+		} a;
+	};
+};
+
+struct iproc_ctx_s {
+	u8 enckey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
+	unsigned int enckeylen;
+
+	u8 authkey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
+	unsigned int authkeylen;
+
+	u8 salt[MAX_SALT_SIZE];
+	unsigned int salt_len;
+	unsigned int salt_offset;
+	u8 iv[MAX_IV_SIZE];
+
+	unsigned int digestsize;
+
+	struct iproc_alg_s *alg;
+	bool is_esp;
+
+	struct cipher_op cipher;
+	enum spu_cipher_type cipher_type;
+
+	struct auth_op auth;
+	bool auth_first;
+
+	/*
+	 * The maximum length in bytes of the payload in a SPU message for this
+	 * context. For SPU-M, the payload is the combination of AAD and data.
+	 * For SPU2, the payload is just data. A value of SPU_MAX_PAYLOAD_INF
+	 * indicates that there is no limit to the length of the SPU message
+	 * payload.
+	 */
+	unsigned int max_payload;
+
+	struct crypto_aead *fallback_cipher;
+
+	/* auth_type is determined during processing of request */
+
+	u8 ipad[MAX_HASH_BLOCK_SIZE];
+	u8 opad[MAX_HASH_BLOCK_SIZE];
+
+	/*
+	 * Buffer to hold SPU message header template. Template is created at
+	 * setkey time for ablkcipher requests, since most of the fields in the
+	 * header are known at that time. At request time, just fill in a few
+	 * missing pieces related to length of data in the request and IVs, etc.
+	 */
+	u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
+
+	/* Length of SPU request header */
+	u16 spu_req_hdr_len;
+
+	/* Expected length of SPU response header */
+	u16 spu_resp_hdr_len;
+
+	/*
+	 * shash descriptor - needed to perform incremental hashing in
+	 * in software, when hw doesn't support it.
+	 */
+	struct shash_desc *shash;
+
+	bool is_rfc4543;	/* RFC 4543 style of GMAC */
+};
+
+/* state from iproc_reqctx_s necessary for hash state export/import */
+struct spu_hash_export_s {
+	unsigned int total_todo;
+	unsigned int total_sent;
+	u8 hash_carry[HASH_CARRY_MAX];
+	unsigned int hash_carry_len;
+	u8 incr_hash[MAX_DIGEST_SIZE];
+	bool is_sw_hmac;
+};
+
+struct iproc_reqctx_s {
+	/* general context */
+	struct crypto_async_request *parent;
+
+	/* only valid after enqueue() */
+	struct iproc_ctx_s *ctx;
+
+	u8 chan_idx;   /* Mailbox channel to be used to submit this request */
+
+	/* total todo, rx'd, and sent for this request */
+	unsigned int total_todo;
+	unsigned int total_received;	/* only valid for ablkcipher */
+	unsigned int total_sent;
+
+	/*
+	 * num bytes sent to hw from the src sg in this request. This can differ
+	 * from total_sent for incremental hashing. total_sent includes previous
+	 * init() and update() data. src_sent does not.
+	 */
+	unsigned int src_sent;
+
+	/*
+	 * For AEAD requests, start of associated data. This will typically
+	 * point to the beginning of the src scatterlist from the request,
+	 * since assoc data is at the beginning of the src scatterlist rather
+	 * than in its own sg.
+	 */
+	struct scatterlist *assoc;
+
+	/*
+	 * scatterlist entry and offset to start of data for next chunk. Crypto
+	 * API src scatterlist for AEAD starts with AAD, if present. For first
+	 * chunk, src_sg is sg entry at beginning of input data (after AAD).
+	 * src_skip begins at the offset in that sg entry where data begins.
+	 */
+	struct scatterlist *src_sg;
+	int src_nents;		/* Number of src entries with data */
+	u32 src_skip;		/* bytes of current sg entry already used */
+
+	/*
+	 * Same for destination. For AEAD, if there is AAD, output data must
+	 * be written at offset following AAD.
+	 */
+	struct scatterlist *dst_sg;
+	int dst_nents;		/* Number of dst entries with data */
+	u32 dst_skip;		/* bytes of current sg entry already written */
+
+	/* Mailbox message used to send this request to PDC driver */
+	struct brcm_message mb_mssg;
+
+	bool bd_suppress;	/* suppress BD field in SPU response? */
+
+	/* cipher context */
+	bool is_encrypt;
+
+	/*
+	 * CBC mode: IV.  CTR mode: counter.  Else empty. Used as a DMA
+	 * buffer for AEAD requests. So allocate as DMAable memory. If IV
+	 * concatenated with salt, includes the salt.
+	 */
+	u8 *iv_ctr;
+	/* Length of IV or counter, in bytes */
+	unsigned int iv_ctr_len;
+
+	/*
+	 * Hash requests can be of any size, whether initial, update, or final.
+	 * A non-final request must be submitted to the SPU as an integral
+	 * number of blocks. This may leave data at the end of the request
+	 * that is not a full block. Since the request is non-final, it cannot
+	 * be padded. So, we write the remainder to this hash_carry buffer and
+	 * hold it until the next request arrives. The carry data is then
+	 * submitted at the beginning of the data in the next SPU msg.
+	 * hash_carry_len is the number of bytes currently in hash_carry. These
+	 * fields are only used for ahash requests.
+	 */
+	u8 hash_carry[HASH_CARRY_MAX];
+	unsigned int hash_carry_len;
+	unsigned int is_final;	/* is this the final for the hash op? */
+
+	/*
+	 * Digest from incremental hash is saved here to include in next hash
+	 * operation. Cannot be stored in req->result for truncated hashes,
+	 * since result may be sized for final digest. Cannot be saved in
+	 * msg_buf because that gets deleted between incremental hash ops
+	 * and is not saved as part of export().
+	 */
+	u8 incr_hash[MAX_DIGEST_SIZE];
+
+	/* hmac context */
+	bool is_sw_hmac;
+
+	/* aead context */
+	struct crypto_tfm *old_tfm;
+	crypto_completion_t old_complete;
+	void *old_data;
+
+	gfp_t gfp;
+
+	/* Buffers used to build SPU request and response messages */
+	struct spu_msg_buf msg_buf;
+};
+
+/*
+ * Structure encapsulates a set of function pointers specific to the type of
+ * SPU hardware running. These functions handling creation and parsing of
+ * SPU request messages and SPU response messages. Includes hardware-specific
+ * values read from device tree.
+ */
+struct spu_hw {
+	void (*spu_dump_msg_hdr)(u8 *buf, unsigned int buf_len);
+	u32 (*spu_ctx_max_payload)(enum spu_cipher_alg cipher_alg,
+				   enum spu_cipher_mode cipher_mode,
+				   unsigned int blocksize);
+	u32 (*spu_payload_length)(u8 *spu_hdr);
+	u16 (*spu_response_hdr_len)(u16 auth_key_len, u16 enc_key_len,
+				    bool is_hash);
+	u16 (*spu_hash_pad_len)(enum hash_alg hash_alg,
+				enum hash_mode hash_mode, u32 chunksize,
+				u16 hash_block_size);
+	u32 (*spu_gcm_ccm_pad_len)(enum spu_cipher_mode cipher_mode,
+				   unsigned int data_size);
+	u32 (*spu_assoc_resp_len)(enum spu_cipher_mode cipher_mode,
+				  unsigned int assoc_len,
+				  unsigned int iv_len, bool is_encrypt);
+	u8 (*spu_aead_ivlen)(enum spu_cipher_mode cipher_mode,
+			     u16 iv_len);
+	enum hash_type (*spu_hash_type)(u32 src_sent);
+	u32 (*spu_digest_size)(u32 digest_size, enum hash_alg alg,
+			       enum hash_type);
+	u32 (*spu_create_request)(u8 *spu_hdr,
+				  struct spu_request_opts *req_opts,
+				  struct spu_cipher_parms *cipher_parms,
+				  struct spu_hash_parms *hash_parms,
+				  struct spu_aead_parms *aead_parms,
+				  unsigned int data_size);
+	u16 (*spu_cipher_req_init)(u8 *spu_hdr,
+				   struct spu_cipher_parms *cipher_parms);
+	void (*spu_cipher_req_finish)(u8 *spu_hdr,
+				      u16 spu_req_hdr_len,
+				      unsigned int is_inbound,
+				      struct spu_cipher_parms *cipher_parms,
+				      bool update_key,
+				      unsigned int data_size);
+	void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding,
+				u32 hash_pad_len, enum hash_alg auth_alg,
+				enum hash_mode auth_mode,
+				unsigned int total_sent, u32 status_padding);
+	u8 (*spu_xts_tweak_in_payload)(void);
+	u8 (*spu_tx_status_len)(void);
+	u8 (*spu_rx_status_len)(void);
+	int (*spu_status_process)(u8 *statp);
+	void (*spu_ccm_update_iv)(unsigned int digestsize,
+				  struct spu_cipher_parms *cipher_parms,
+				  unsigned int assoclen, unsigned int chunksize,
+				  bool is_encrypt, bool is_esp);
+	u32 (*spu_wordalign_padlen)(u32 data_size);
+
+	/* The base virtual address of the SPU hw registers */
+	void __iomem *reg_vbase[MAX_SPUS];
+
+	/* Version of the SPU hardware */
+	enum spu_spu_type spu_type;
+
+	/* Sub-version of the SPU hardware */
+	enum spu_spu_subtype spu_subtype;
+
+	/* The number of SPUs on this platform */
+	u32 num_spu;
+
+	/* The number of SPU channels on this platform */
+	u32 num_chan;
+};
+
+struct device_private {
+	struct platform_device *pdev;
+
+	struct spu_hw spu;
+
+	atomic_t session_count;	/* number of streams active */
+	atomic_t stream_count;	/* monotonic counter for streamID's */
+
+	/* Length of BCM header. Set to 0 when hw does not expect BCM HEADER. */
+	u8 bcm_hdr_len;
+
+	/* The index of the channel to use for the next crypto request */
+	atomic_t next_chan;
+
+	struct dentry *debugfs_dir;
+	struct dentry *debugfs_stats;
+
+	/* Number of request bytes processed and result bytes returned */
+	atomic64_t bytes_in;
+	atomic64_t bytes_out;
+
+	/* Number of operations of each type */
+	atomic_t op_counts[SPU_OP_NUM];
+
+	atomic_t cipher_cnt[CIPHER_ALG_LAST][CIPHER_MODE_LAST];
+	atomic_t hash_cnt[HASH_ALG_LAST];
+	atomic_t hmac_cnt[HASH_ALG_LAST];
+	atomic_t aead_cnt[AEAD_TYPE_LAST];
+
+	/* Number of calls to setkey() for each operation type */
+	atomic_t setkey_cnt[SPU_OP_NUM];
+
+	/* Number of times request was resubmitted because mb was full */
+	atomic_t mb_no_spc;
+
+	/* Number of mailbox send failures */
+	atomic_t mb_send_fail;
+
+	/* Number of ICV check failures for AEAD messages */
+	atomic_t bad_icv;
+
+	struct mbox_client mcl;
+
+	/* Array of mailbox channel pointers, one for each channel */
+	struct mbox_chan **mbox;
+};
+
+extern struct device_private iproc_priv;
+
+#endif
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
new file mode 100644
index 0000000..dbb5c03
--- /dev/null
+++ b/drivers/crypto/bcm/spu.c
@@ -0,0 +1,1251 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include "util.h"
+#include "spu.h"
+#include "spum.h"
+#include "cipher.h"
+
+/* This array is based on the hash algo type supported in spu.h */
+char *tag_to_hash_idx[] = { "none", "md5", "sha1", "sha224", "sha256" };
+
+char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
+	"sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
+
+char *aead_alg_name[] = { "ccm(aes)", "gcm(aes)", "authenc" };
+
+/* Assumes SPU-M messages are in big endian */
+void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
+{
+	u8 *ptr = buf;
+	struct SPUHEADER *spuh = (struct SPUHEADER *)buf;
+	unsigned int hash_key_len = 0;
+	unsigned int hash_state_len = 0;
+	unsigned int cipher_key_len = 0;
+	unsigned int iv_len;
+	u32 pflags;
+	u32 cflags;
+	u32 ecf;
+	u32 cipher_alg;
+	u32 cipher_mode;
+	u32 cipher_type;
+	u32 hash_alg;
+	u32 hash_mode;
+	u32 hash_type;
+	u32 sctx_size;   /* SCTX length in words */
+	u32 sctx_pl_len; /* SCTX payload length in bytes */
+
+	packet_log("\n");
+	packet_log("SPU Message header %p len: %u\n", buf, buf_len);
+
+	/* ========== Decode MH ========== */
+	packet_log("  MH 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+	if (spuh->mh.flags & MH_SCTX_PRES)
+		packet_log("    SCTX  present\n");
+	if (spuh->mh.flags & MH_BDESC_PRES)
+		packet_log("    BDESC present\n");
+	if (spuh->mh.flags & MH_MFM_PRES)
+		packet_log("    MFM   present\n");
+	if (spuh->mh.flags & MH_BD_PRES)
+		packet_log("    BD    present\n");
+	if (spuh->mh.flags & MH_HASH_PRES)
+		packet_log("    HASH  present\n");
+	if (spuh->mh.flags & MH_SUPDT_PRES)
+		packet_log("    SUPDT present\n");
+	packet_log("    Opcode 0x%02x\n", spuh->mh.op_code);
+
+	ptr += sizeof(spuh->mh) + sizeof(spuh->emh);  /* skip emh. unused */
+
+	/* ========== Decode SCTX ========== */
+	if (spuh->mh.flags & MH_SCTX_PRES) {
+		pflags = be32_to_cpu(spuh->sa.proto_flags);
+		packet_log("  SCTX[0] 0x%08x\n", pflags);
+		sctx_size = pflags & SCTX_SIZE;
+		packet_log("    Size %u words\n", sctx_size);
+
+		cflags = be32_to_cpu(spuh->sa.cipher_flags);
+		packet_log("  SCTX[1] 0x%08x\n", cflags);
+		packet_log("    Inbound:%lu (1:decrypt/vrfy 0:encrypt/auth)\n",
+			   (cflags & CIPHER_INBOUND) >> CIPHER_INBOUND_SHIFT);
+		packet_log("    Order:%lu (1:AuthFirst 0:EncFirst)\n",
+			   (cflags & CIPHER_ORDER) >> CIPHER_ORDER_SHIFT);
+		packet_log("    ICV_IS_512:%lx\n",
+			   (cflags & ICV_IS_512) >> ICV_IS_512_SHIFT);
+		cipher_alg = (cflags & CIPHER_ALG) >> CIPHER_ALG_SHIFT;
+		cipher_mode = (cflags & CIPHER_MODE) >> CIPHER_MODE_SHIFT;
+		cipher_type = (cflags & CIPHER_TYPE) >> CIPHER_TYPE_SHIFT;
+		packet_log("    Crypto Alg:%u Mode:%u Type:%u\n",
+			   cipher_alg, cipher_mode, cipher_type);
+		hash_alg = (cflags & HASH_ALG) >> HASH_ALG_SHIFT;
+		hash_mode = (cflags & HASH_MODE) >> HASH_MODE_SHIFT;
+		hash_type = (cflags & HASH_TYPE) >> HASH_TYPE_SHIFT;
+		packet_log("    Hash   Alg:%x Mode:%x Type:%x\n",
+			   hash_alg, hash_mode, hash_type);
+		packet_log("    UPDT_Offset:%u\n", cflags & UPDT_OFST);
+
+		ecf = be32_to_cpu(spuh->sa.ecf);
+		packet_log("  SCTX[2] 0x%08x\n", ecf);
+		packet_log("    WriteICV:%lu CheckICV:%lu ICV_SIZE:%u ",
+			   (ecf & INSERT_ICV) >> INSERT_ICV_SHIFT,
+			   (ecf & CHECK_ICV) >> CHECK_ICV_SHIFT,
+			   (ecf & ICV_SIZE) >> ICV_SIZE_SHIFT);
+		packet_log("BD_SUPPRESS:%lu\n",
+			   (ecf & BD_SUPPRESS) >> BD_SUPPRESS_SHIFT);
+		packet_log("    SCTX_IV:%lu ExplicitIV:%lu GenIV:%lu ",
+			   (ecf & SCTX_IV) >> SCTX_IV_SHIFT,
+			   (ecf & EXPLICIT_IV) >> EXPLICIT_IV_SHIFT,
+			   (ecf & GEN_IV) >> GEN_IV_SHIFT);
+		packet_log("IV_OV_OFST:%lu EXP_IV_SIZE:%u\n",
+			   (ecf & IV_OFFSET) >> IV_OFFSET_SHIFT,
+			   ecf & EXP_IV_SIZE);
+
+		ptr += sizeof(struct SCTX);
+
+		if (hash_alg && hash_mode) {
+			char *name = "NONE";
+
+			switch (hash_alg) {
+			case HASH_ALG_MD5:
+				hash_key_len = 16;
+				name = "MD5";
+				break;
+			case HASH_ALG_SHA1:
+				hash_key_len = 20;
+				name = "SHA1";
+				break;
+			case HASH_ALG_SHA224:
+				hash_key_len = 28;
+				name = "SHA224";
+				break;
+			case HASH_ALG_SHA256:
+				hash_key_len = 32;
+				name = "SHA256";
+				break;
+			case HASH_ALG_SHA384:
+				hash_key_len = 48;
+				name = "SHA384";
+				break;
+			case HASH_ALG_SHA512:
+				hash_key_len = 64;
+				name = "SHA512";
+				break;
+			case HASH_ALG_AES:
+				hash_key_len = 0;
+				name = "AES";
+				break;
+			case HASH_ALG_NONE:
+				break;
+			}
+
+			packet_log("    Auth Key Type:%s Length:%u Bytes\n",
+				   name, hash_key_len);
+			packet_dump("    KEY: ", ptr, hash_key_len);
+			ptr += hash_key_len;
+		} else if ((hash_alg == HASH_ALG_AES) &&
+			   (hash_mode == HASH_MODE_XCBC)) {
+			char *name = "NONE";
+
+			switch (cipher_type) {
+			case CIPHER_TYPE_AES128:
+				hash_key_len = 16;
+				name = "AES128-XCBC";
+				break;
+			case CIPHER_TYPE_AES192:
+				hash_key_len = 24;
+				name = "AES192-XCBC";
+				break;
+			case CIPHER_TYPE_AES256:
+				hash_key_len = 32;
+				name = "AES256-XCBC";
+				break;
+			}
+			packet_log("    Auth Key Type:%s Length:%u Bytes\n",
+				   name, hash_key_len);
+			packet_dump("    KEY: ", ptr, hash_key_len);
+			ptr += hash_key_len;
+		}
+
+		if (hash_alg && (hash_mode == HASH_MODE_NONE) &&
+		    (hash_type == HASH_TYPE_UPDT)) {
+			char *name = "NONE";
+
+			switch (hash_alg) {
+			case HASH_ALG_MD5:
+				hash_state_len = 16;
+				name = "MD5";
+				break;
+			case HASH_ALG_SHA1:
+				hash_state_len = 20;
+				name = "SHA1";
+				break;
+			case HASH_ALG_SHA224:
+				hash_state_len = 32;
+				name = "SHA224";
+				break;
+			case HASH_ALG_SHA256:
+				hash_state_len = 32;
+				name = "SHA256";
+				break;
+			case HASH_ALG_SHA384:
+				hash_state_len = 48;
+				name = "SHA384";
+				break;
+			case HASH_ALG_SHA512:
+				hash_state_len = 64;
+				name = "SHA512";
+				break;
+			case HASH_ALG_AES:
+				hash_state_len = 0;
+				name = "AES";
+				break;
+			case HASH_ALG_NONE:
+				break;
+			}
+
+			packet_log("    Auth State Type:%s Length:%u Bytes\n",
+				   name, hash_state_len);
+			packet_dump("    State: ", ptr, hash_state_len);
+			ptr += hash_state_len;
+		}
+
+		if (cipher_alg) {
+			char *name = "NONE";
+
+			switch (cipher_alg) {
+			case CIPHER_ALG_DES:
+				cipher_key_len = 8;
+				name = "DES";
+				break;
+			case CIPHER_ALG_3DES:
+				cipher_key_len = 24;
+				name = "3DES";
+				break;
+			case CIPHER_ALG_RC4:
+				cipher_key_len = 260;
+				name = "ARC4";
+				break;
+			case CIPHER_ALG_AES:
+				switch (cipher_type) {
+				case CIPHER_TYPE_AES128:
+					cipher_key_len = 16;
+					name = "AES128";
+					break;
+				case CIPHER_TYPE_AES192:
+					cipher_key_len = 24;
+					name = "AES192";
+					break;
+				case CIPHER_TYPE_AES256:
+					cipher_key_len = 32;
+					name = "AES256";
+					break;
+				}
+				break;
+			case CIPHER_ALG_NONE:
+				break;
+			}
+
+			packet_log("    Cipher Key Type:%s Length:%u Bytes\n",
+				   name, cipher_key_len);
+
+			/* XTS has two keys */
+			if (cipher_mode == CIPHER_MODE_XTS) {
+				packet_dump("    KEY2: ", ptr, cipher_key_len);
+				ptr += cipher_key_len;
+				packet_dump("    KEY1: ", ptr, cipher_key_len);
+				ptr += cipher_key_len;
+
+				cipher_key_len *= 2;
+			} else {
+				packet_dump("    KEY: ", ptr, cipher_key_len);
+				ptr += cipher_key_len;
+			}
+
+			if (ecf & SCTX_IV) {
+				sctx_pl_len = sctx_size * sizeof(u32) -
+					sizeof(struct SCTX);
+				iv_len = sctx_pl_len -
+					(hash_key_len + hash_state_len +
+					 cipher_key_len);
+				packet_log("    IV Length:%u Bytes\n", iv_len);
+				packet_dump("    IV: ", ptr, iv_len);
+				ptr += iv_len;
+			}
+		}
+	}
+
+	/* ========== Decode BDESC ========== */
+	if (spuh->mh.flags & MH_BDESC_PRES) {
+#ifdef DEBUG
+		struct BDESC_HEADER *bdesc = (struct BDESC_HEADER *)ptr;
+#endif
+		packet_log("  BDESC[0] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+		packet_log("    OffsetMAC:%u LengthMAC:%u\n",
+			   be16_to_cpu(bdesc->offset_mac),
+			   be16_to_cpu(bdesc->length_mac));
+		ptr += sizeof(u32);
+
+		packet_log("  BDESC[1] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+		packet_log("    OffsetCrypto:%u LengthCrypto:%u\n",
+			   be16_to_cpu(bdesc->offset_crypto),
+			   be16_to_cpu(bdesc->length_crypto));
+		ptr += sizeof(u32);
+
+		packet_log("  BDESC[2] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+		packet_log("    OffsetICV:%u OffsetIV:%u\n",
+			   be16_to_cpu(bdesc->offset_icv),
+			   be16_to_cpu(bdesc->offset_iv));
+		ptr += sizeof(u32);
+	}
+
+	/* ========== Decode BD ========== */
+	if (spuh->mh.flags & MH_BD_PRES) {
+#ifdef DEBUG
+		struct BD_HEADER *bd = (struct BD_HEADER *)ptr;
+#endif
+		packet_log("  BD[0] 0x%08x\n", be32_to_cpu(*((u32 *)ptr)));
+		packet_log("    Size:%ubytes PrevLength:%u\n",
+			   be16_to_cpu(bd->size), be16_to_cpu(bd->prev_length));
+		ptr += 4;
+	}
+
+	/* Double check sanity */
+	if (buf + buf_len != ptr) {
+		packet_log(" Packet parsed incorrectly. ");
+		packet_log("buf:%p buf_len:%u buf+buf_len:%p ptr:%p\n",
+			   buf, buf_len, buf + buf_len, ptr);
+	}
+
+	packet_log("\n");
+}
+
+/**
+ * spum_ns2_ctx_max_payload() - Determine the max length of the payload for a
+ * SPU message for a given cipher and hash alg context.
+ * @cipher_alg:		The cipher algorithm
+ * @cipher_mode:	The cipher mode
+ * @blocksize:		The size of a block of data for this algo
+ *
+ * The max payload must be a multiple of the blocksize so that if a request is
+ * too large to fit in a single SPU message, the request can be broken into
+ * max_payload sized chunks. Each chunk must be a multiple of blocksize.
+ *
+ * Return: Max payload length in bytes
+ */
+u32 spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
+			     enum spu_cipher_mode cipher_mode,
+			     unsigned int blocksize)
+{
+	u32 max_payload = SPUM_NS2_MAX_PAYLOAD;
+	u32 excess;
+
+	/* In XTS on SPU-M, we'll need to insert tweak before input data */
+	if (cipher_mode == CIPHER_MODE_XTS)
+		max_payload -= SPU_XTS_TWEAK_SIZE;
+
+	excess = max_payload % blocksize;
+
+	return max_payload - excess;
+}
+
+/**
+ * spum_nsp_ctx_max_payload() - Determine the max length of the payload for a
+ * SPU message for a given cipher and hash alg context.
+ * @cipher_alg:		The cipher algorithm
+ * @cipher_mode:	The cipher mode
+ * @blocksize:		The size of a block of data for this algo
+ *
+ * The max payload must be a multiple of the blocksize so that if a request is
+ * too large to fit in a single SPU message, the request can be broken into
+ * max_payload sized chunks. Each chunk must be a multiple of blocksize.
+ *
+ * Return: Max payload length in bytes
+ */
+u32 spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg,
+			     enum spu_cipher_mode cipher_mode,
+			     unsigned int blocksize)
+{
+	u32 max_payload = SPUM_NSP_MAX_PAYLOAD;
+	u32 excess;
+
+	/* In XTS on SPU-M, we'll need to insert tweak before input data */
+	if (cipher_mode == CIPHER_MODE_XTS)
+		max_payload -= SPU_XTS_TWEAK_SIZE;
+
+	excess = max_payload % blocksize;
+
+	return max_payload - excess;
+}
+
+/** spum_payload_length() - Given a SPU-M message header, extract the payload
+ * length.
+ * @spu_hdr:	Start of SPU header
+ *
+ * Assumes just MH, EMH, BD (no SCTX, BDESC. Works for response frames.
+ *
+ * Return: payload length in bytes
+ */
+u32 spum_payload_length(u8 *spu_hdr)
+{
+	struct BD_HEADER *bd;
+	u32 pl_len;
+
+	/* Find BD header.  skip MH, EMH */
+	bd = (struct BD_HEADER *)(spu_hdr + 8);
+	pl_len = be16_to_cpu(bd->size);
+
+	return pl_len;
+}
+
+/**
+ * spum_response_hdr_len() - Given the length of the hash key and encryption
+ * key, determine the expected length of a SPU response header.
+ * @auth_key_len:	authentication key length (bytes)
+ * @enc_key_len:	encryption key length (bytes)
+ * @is_hash:		true if response message is for a hash operation
+ *
+ * Return: length of SPU response header (bytes)
+ */
+u16 spum_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
+{
+	if (is_hash)
+		return SPU_HASH_RESP_HDR_LEN;
+	else
+		return SPU_RESP_HDR_LEN;
+}
+
+/**
+ * spum_hash_pad_len() - Calculate the length of hash padding required to extend
+ * data to a full block size.
+ * @hash_alg:   hash algorithm
+ * @hash_mode:       hash mode
+ * @chunksize:  length of data, in bytes
+ * @hash_block_size:  size of a block of data for hash algorithm
+ *
+ * Reserve space for 1 byte (0x80) start of pad and the total length as u64
+ *
+ * Return:  length of hash pad in bytes
+ */
+u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
+		      u32 chunksize, u16 hash_block_size)
+{
+	unsigned int length_len;
+	unsigned int used_space_last_block;
+	int hash_pad_len;
+
+	/* AES-XCBC hash requires just padding to next block boundary */
+	if ((hash_alg == HASH_ALG_AES) && (hash_mode == HASH_MODE_XCBC)) {
+		used_space_last_block = chunksize % hash_block_size;
+		hash_pad_len = hash_block_size - used_space_last_block;
+		if (hash_pad_len >= hash_block_size)
+			hash_pad_len -= hash_block_size;
+		return hash_pad_len;
+	}
+
+	used_space_last_block = chunksize % hash_block_size + 1;
+	if ((hash_alg == HASH_ALG_SHA384) || (hash_alg == HASH_ALG_SHA512))
+		length_len = 2 * sizeof(u64);
+	else
+		length_len = sizeof(u64);
+
+	used_space_last_block += length_len;
+	hash_pad_len = hash_block_size - used_space_last_block;
+	if (hash_pad_len < 0)
+		hash_pad_len += hash_block_size;
+
+	hash_pad_len += 1 + length_len;
+	return hash_pad_len;
+}
+
+/**
+ * spum_gcm_ccm_pad_len() - Determine the required length of GCM or CCM padding.
+ * @cipher_mode:	Algo type
+ * @data_size:		Length of plaintext (bytes)
+ *
+ * @Return: Length of padding, in bytes
+ */
+u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
+			 unsigned int data_size)
+{
+	u32 pad_len = 0;
+	u32 m1 = SPU_GCM_CCM_ALIGN - 1;
+
+	if ((cipher_mode == CIPHER_MODE_GCM) ||
+	    (cipher_mode == CIPHER_MODE_CCM))
+		pad_len = ((data_size + m1) & ~m1) - data_size;
+
+	return pad_len;
+}
+
+/**
+ * spum_assoc_resp_len() - Determine the size of the receive buffer required to
+ * catch associated data.
+ * @cipher_mode:	cipher mode
+ * @assoc_len:		length of associated data (bytes)
+ * @iv_len:		length of IV (bytes)
+ * @is_encrypt:		true if encrypting. false if decrypting.
+ *
+ * Return: length of associated data in response message (bytes)
+ */
+u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
+			unsigned int assoc_len, unsigned int iv_len,
+			bool is_encrypt)
+{
+	u32 buflen = 0;
+	u32 pad;
+
+	if (assoc_len)
+		buflen = assoc_len;
+
+	if (cipher_mode == CIPHER_MODE_GCM) {
+		/* AAD needs to be padded in responses too */
+		pad = spum_gcm_ccm_pad_len(cipher_mode, buflen);
+		buflen += pad;
+	}
+	if (cipher_mode == CIPHER_MODE_CCM) {
+		/*
+		 * AAD needs to be padded in responses too
+		 * for CCM, len + 2 needs to be 128-bit aligned.
+		 */
+		pad = spum_gcm_ccm_pad_len(cipher_mode, buflen + 2);
+		buflen += pad;
+	}
+
+	return buflen;
+}
+
+/**
+ * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+ * in a SPU request after the AAD and before the payload.
+ * @cipher_mode:  cipher mode
+ * @iv_ctr_len:   initialization vector length in bytes
+ *
+ * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
+ * to include the IV as a separate field in the SPU request msg.
+ *
+ * Return: Length of AEAD IV in bytes
+ */
+u8 spum_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len)
+{
+	return 0;
+}
+
+/**
+ * spum_hash_type() - Determine the type of hash operation.
+ * @src_sent:  The number of bytes in the current request that have already
+ *             been sent to the SPU to be hashed.
+ *
+ * We do not use HASH_TYPE_FULL for requests that fit in a single SPU message.
+ * Using FULL causes failures (such as when the string to be hashed is empty).
+ * For similar reasons, we never use HASH_TYPE_FIN. Instead, submit messages
+ * as INIT or UPDT and do the hash padding in sw.
+ */
+enum hash_type spum_hash_type(u32 src_sent)
+{
+	return src_sent ? HASH_TYPE_UPDT : HASH_TYPE_INIT;
+}
+
+/**
+ * spum_digest_size() - Determine the size of a hash digest to expect the SPU to
+ * return.
+ * alg_digest_size: Number of bytes in the final digest for the given algo
+ * alg:             The hash algorithm
+ * htype:           Type of hash operation (init, update, full, etc)
+ *
+ * When doing incremental hashing for an algorithm with a truncated hash
+ * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
+ * a partial result for the next chunk.
+ */
+u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
+		     enum hash_type htype)
+{
+	u32 digestsize = alg_digest_size;
+
+	/* SPU returns complete digest when doing incremental hash and truncated
+	 * hash algo.
+	 */
+	if ((htype == HASH_TYPE_INIT) || (htype == HASH_TYPE_UPDT)) {
+		if (alg == HASH_ALG_SHA224)
+			digestsize = SHA256_DIGEST_SIZE;
+		else if (alg == HASH_ALG_SHA384)
+			digestsize = SHA512_DIGEST_SIZE;
+	}
+	return digestsize;
+}
+
+/**
+ * spum_create_request() - Build a SPU request message header, up to and
+ * including the BD header. Construct the message starting at spu_hdr. Caller
+ * should allocate this buffer in DMA-able memory at least SPU_HEADER_ALLOC_LEN
+ * bytes long.
+ * @spu_hdr: Start of buffer where SPU request header is to be written
+ * @req_opts: SPU request message options
+ * @cipher_parms: Parameters related to cipher algorithm
+ * @hash_parms:   Parameters related to hash algorithm
+ * @aead_parms:   Parameters related to AEAD operation
+ * @data_size:    Length of data to be encrypted or authenticated. If AEAD, does
+ *		  not include length of AAD.
+
+ * Return: the length of the SPU header in bytes. 0 if an error occurs.
+ */
+u32 spum_create_request(u8 *spu_hdr,
+			struct spu_request_opts *req_opts,
+			struct spu_cipher_parms *cipher_parms,
+			struct spu_hash_parms *hash_parms,
+			struct spu_aead_parms *aead_parms,
+			unsigned int data_size)
+{
+	struct SPUHEADER *spuh;
+	struct BDESC_HEADER *bdesc;
+	struct BD_HEADER *bd;
+
+	u8 *ptr;
+	u32 protocol_bits = 0;
+	u32 cipher_bits = 0;
+	u32 ecf_bits = 0;
+	u8 sctx_words = 0;
+	unsigned int buf_len = 0;
+
+	/* size of the cipher payload */
+	unsigned int cipher_len = hash_parms->prebuf_len + data_size +
+				hash_parms->pad_len;
+
+	/* offset of prebuf or data from end of BD header */
+	unsigned int cipher_offset = aead_parms->assoc_size +
+		aead_parms->iv_len + aead_parms->aad_pad_len;
+
+	/* total size of the DB data (without STAT word padding) */
+	unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
+						 aead_parms->iv_len,
+						 hash_parms->prebuf_len,
+						 data_size,
+						 aead_parms->aad_pad_len,
+						 aead_parms->data_pad_len,
+						 hash_parms->pad_len);
+
+	unsigned int auth_offset = 0;
+	unsigned int offset_iv = 0;
+
+	/* size/offset of the auth payload */
+	unsigned int auth_len;
+
+	auth_len = real_db_size;
+
+	if (req_opts->is_aead && req_opts->is_inbound)
+		cipher_len -= hash_parms->digestsize;
+
+	if (req_opts->is_aead && req_opts->is_inbound)
+		auth_len -= hash_parms->digestsize;
+
+	if ((hash_parms->alg == HASH_ALG_AES) &&
+	    (hash_parms->mode == HASH_MODE_XCBC)) {
+		auth_len -= hash_parms->pad_len;
+		cipher_len -= hash_parms->pad_len;
+	}
+
+	flow_log("%s()\n", __func__);
+	flow_log("  in:%u authFirst:%u\n",
+		 req_opts->is_inbound, req_opts->auth_first);
+	flow_log("  %s. cipher alg:%u mode:%u type %u\n",
+		 spu_alg_name(cipher_parms->alg, cipher_parms->mode),
+		 cipher_parms->alg, cipher_parms->mode, cipher_parms->type);
+	flow_log("    key: %d\n", cipher_parms->key_len);
+	flow_dump("    key: ", cipher_parms->key_buf, cipher_parms->key_len);
+	flow_log("    iv: %d\n", cipher_parms->iv_len);
+	flow_dump("    iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
+	flow_log("  auth alg:%u mode:%u type %u\n",
+		 hash_parms->alg, hash_parms->mode, hash_parms->type);
+	flow_log("  digestsize: %u\n", hash_parms->digestsize);
+	flow_log("  authkey: %d\n", hash_parms->key_len);
+	flow_dump("  authkey: ", hash_parms->key_buf, hash_parms->key_len);
+	flow_log("  assoc_size:%u\n", aead_parms->assoc_size);
+	flow_log("  prebuf_len:%u\n", hash_parms->prebuf_len);
+	flow_log("  data_size:%u\n", data_size);
+	flow_log("  hash_pad_len:%u\n", hash_parms->pad_len);
+	flow_log("  real_db_size:%u\n", real_db_size);
+	flow_log(" auth_offset:%u auth_len:%u cipher_offset:%u cipher_len:%u\n",
+		 auth_offset, auth_len, cipher_offset, cipher_len);
+	flow_log("  aead_iv: %u\n", aead_parms->iv_len);
+
+	/* starting out: zero the header (plus some) */
+	ptr = spu_hdr;
+	memset(ptr, 0, sizeof(struct SPUHEADER));
+
+	/* format master header word */
+	/* Do not set the next bit even though the datasheet says to */
+	spuh = (struct SPUHEADER *)ptr;
+	ptr += sizeof(struct SPUHEADER);
+	buf_len += sizeof(struct SPUHEADER);
+
+	spuh->mh.op_code = SPU_CRYPTO_OPERATION_GENERIC;
+	spuh->mh.flags |= (MH_SCTX_PRES | MH_BDESC_PRES | MH_BD_PRES);
+
+	/* Format sctx word 0 (protocol_bits) */
+	sctx_words = 3;		/* size in words */
+
+	/* Format sctx word 1 (cipher_bits) */
+	if (req_opts->is_inbound)
+		cipher_bits |= CIPHER_INBOUND;
+	if (req_opts->auth_first)
+		cipher_bits |= CIPHER_ORDER;
+
+	/* Set the crypto parameters in the cipher.flags */
+	cipher_bits |= cipher_parms->alg << CIPHER_ALG_SHIFT;
+	cipher_bits |= cipher_parms->mode << CIPHER_MODE_SHIFT;
+	cipher_bits |= cipher_parms->type << CIPHER_TYPE_SHIFT;
+
+	/* Set the auth parameters in the cipher.flags */
+	cipher_bits |= hash_parms->alg << HASH_ALG_SHIFT;
+	cipher_bits |= hash_parms->mode << HASH_MODE_SHIFT;
+	cipher_bits |= hash_parms->type << HASH_TYPE_SHIFT;
+
+	/*
+	 * Format sctx extensions if required, and update main fields if
+	 * required)
+	 */
+	if (hash_parms->alg) {
+		/* Write the authentication key material if present */
+		if (hash_parms->key_len) {
+			memcpy(ptr, hash_parms->key_buf, hash_parms->key_len);
+			ptr += hash_parms->key_len;
+			buf_len += hash_parms->key_len;
+			sctx_words += hash_parms->key_len / 4;
+		}
+
+		if ((cipher_parms->mode == CIPHER_MODE_GCM) ||
+		    (cipher_parms->mode == CIPHER_MODE_CCM))
+			/* unpadded length */
+			offset_iv = aead_parms->assoc_size;
+
+		/* if GCM/CCM we need to write ICV into the payload */
+		if (!req_opts->is_inbound) {
+			if ((cipher_parms->mode == CIPHER_MODE_GCM) ||
+			    (cipher_parms->mode == CIPHER_MODE_CCM))
+				ecf_bits |= 1 << INSERT_ICV_SHIFT;
+		} else {
+			ecf_bits |= CHECK_ICV;
+		}
+
+		/* Inform the SPU of the ICV size (in words) */
+		if (hash_parms->digestsize == 64)
+			cipher_bits |= ICV_IS_512;
+		else
+			ecf_bits |=
+			(hash_parms->digestsize / 4) << ICV_SIZE_SHIFT;
+	}
+
+	if (req_opts->bd_suppress)
+		ecf_bits |= BD_SUPPRESS;
+
+	/* copy the encryption keys in the SAD entry */
+	if (cipher_parms->alg) {
+		if (cipher_parms->key_len) {
+			memcpy(ptr, cipher_parms->key_buf,
+			       cipher_parms->key_len);
+			ptr += cipher_parms->key_len;
+			buf_len += cipher_parms->key_len;
+			sctx_words += cipher_parms->key_len / 4;
+		}
+
+		/*
+		 * if encrypting then set IV size, use SCTX IV unless no IV
+		 * given here
+		 */
+		if (cipher_parms->iv_buf && cipher_parms->iv_len) {
+			/* Use SCTX IV */
+			ecf_bits |= SCTX_IV;
+
+			/* cipher iv provided so put it in here */
+			memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len);
+
+			ptr += cipher_parms->iv_len;
+			buf_len += cipher_parms->iv_len;
+			sctx_words += cipher_parms->iv_len / 4;
+		}
+	}
+
+	/*
+	 * RFC4543 (GMAC/ESP) requires data to be sent as part of AAD
+	 * so we need to override the BDESC parameters.
+	 */
+	if (req_opts->is_rfc4543) {
+		if (req_opts->is_inbound)
+			data_size -= hash_parms->digestsize;
+		offset_iv = aead_parms->assoc_size + data_size;
+		cipher_len = 0;
+		cipher_offset = offset_iv;
+		auth_len = cipher_offset + aead_parms->data_pad_len;
+	}
+
+	/* write in the total sctx length now that we know it */
+	protocol_bits |= sctx_words;
+
+	/* Endian adjust the SCTX */
+	spuh->sa.proto_flags = cpu_to_be32(protocol_bits);
+	spuh->sa.cipher_flags = cpu_to_be32(cipher_bits);
+	spuh->sa.ecf = cpu_to_be32(ecf_bits);
+
+	/* === create the BDESC section === */
+	bdesc = (struct BDESC_HEADER *)ptr;
+
+	bdesc->offset_mac = cpu_to_be16(auth_offset);
+	bdesc->length_mac = cpu_to_be16(auth_len);
+	bdesc->offset_crypto = cpu_to_be16(cipher_offset);
+	bdesc->length_crypto = cpu_to_be16(cipher_len);
+
+	/*
+	 * CCM in SPU-M requires that ICV not be in same 32-bit word as data or
+	 * padding.  So account for padding as necessary.
+	 */
+	if (cipher_parms->mode == CIPHER_MODE_CCM)
+		auth_len += spum_wordalign_padlen(auth_len);
+
+	bdesc->offset_icv = cpu_to_be16(auth_len);
+	bdesc->offset_iv = cpu_to_be16(offset_iv);
+
+	ptr += sizeof(struct BDESC_HEADER);
+	buf_len += sizeof(struct BDESC_HEADER);
+
+	/* === no MFM section === */
+
+	/* === create the BD section === */
+
+	/* add the BD header */
+	bd = (struct BD_HEADER *)ptr;
+	bd->size = cpu_to_be16(real_db_size);
+	bd->prev_length = 0;
+
+	ptr += sizeof(struct BD_HEADER);
+	buf_len += sizeof(struct BD_HEADER);
+
+	packet_dump("  SPU request header: ", spu_hdr, buf_len);
+
+	return buf_len;
+}
+
+/**
+ * spum_cipher_req_init() - Build a SPU request message header, up to and
+ * including the BD header.
+ * @spu_hdr:      Start of SPU request header (MH)
+ * @cipher_parms: Parameters that describe the cipher request
+ *
+ * Construct the message starting at spu_hdr. Caller should allocate this buffer
+ * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
+ *
+ * Return: the length of the SPU header in bytes. 0 if an error occurs.
+ */
+u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
+{
+	struct SPUHEADER *spuh;
+	u32 protocol_bits = 0;
+	u32 cipher_bits = 0;
+	u32 ecf_bits = 0;
+	u8 sctx_words = 0;
+	u8 *ptr = spu_hdr;
+
+	flow_log("%s()\n", __func__);
+	flow_log("  cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
+		 cipher_parms->mode, cipher_parms->type);
+	flow_log("  cipher_iv_len: %u\n", cipher_parms->iv_len);
+	flow_log("    key: %d\n", cipher_parms->key_len);
+	flow_dump("    key: ", cipher_parms->key_buf, cipher_parms->key_len);
+
+	/* starting out: zero the header (plus some) */
+	memset(spu_hdr, 0, sizeof(struct SPUHEADER));
+	ptr += sizeof(struct SPUHEADER);
+
+	/* format master header word */
+	/* Do not set the next bit even though the datasheet says to */
+	spuh = (struct SPUHEADER *)spu_hdr;
+
+	spuh->mh.op_code = SPU_CRYPTO_OPERATION_GENERIC;
+	spuh->mh.flags |= (MH_SCTX_PRES | MH_BDESC_PRES | MH_BD_PRES);
+
+	/* Format sctx word 0 (protocol_bits) */
+	sctx_words = 3;		/* size in words */
+
+	/* copy the encryption keys in the SAD entry */
+	if (cipher_parms->alg) {
+		if (cipher_parms->key_len) {
+			ptr += cipher_parms->key_len;
+			sctx_words += cipher_parms->key_len / 4;
+		}
+
+		/*
+		 * if encrypting then set IV size, use SCTX IV unless no IV
+		 * given here
+		 */
+		if (cipher_parms->iv_len) {
+			/* Use SCTX IV */
+			ecf_bits |= SCTX_IV;
+			ptr += cipher_parms->iv_len;
+			sctx_words += cipher_parms->iv_len / 4;
+		}
+	}
+
+	/* Set the crypto parameters in the cipher.flags */
+	cipher_bits |= cipher_parms->alg << CIPHER_ALG_SHIFT;
+	cipher_bits |= cipher_parms->mode << CIPHER_MODE_SHIFT;
+	cipher_bits |= cipher_parms->type << CIPHER_TYPE_SHIFT;
+
+	/* copy the encryption keys in the SAD entry */
+	if (cipher_parms->alg && cipher_parms->key_len)
+		memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len);
+
+	/* write in the total sctx length now that we know it */
+	protocol_bits |= sctx_words;
+
+	/* Endian adjust the SCTX */
+	spuh->sa.proto_flags = cpu_to_be32(protocol_bits);
+
+	/* Endian adjust the SCTX */
+	spuh->sa.cipher_flags = cpu_to_be32(cipher_bits);
+	spuh->sa.ecf = cpu_to_be32(ecf_bits);
+
+	packet_dump("  SPU request header: ", spu_hdr,
+		    sizeof(struct SPUHEADER));
+
+	return sizeof(struct SPUHEADER) + cipher_parms->key_len +
+		cipher_parms->iv_len + sizeof(struct BDESC_HEADER) +
+		sizeof(struct BD_HEADER);
+}
+
+/**
+ * spum_cipher_req_finish() - Finish building a SPU request message header for a
+ * block cipher request. Assumes much of the header was already filled in at
+ * setkey() time in spu_cipher_req_init().
+ * @spu_hdr:         Start of the request message header (MH field)
+ * @spu_req_hdr_len: Length in bytes of the SPU request header
+ * @isInbound:       0 encrypt, 1 decrypt
+ * @cipher_parms:    Parameters describing cipher operation to be performed
+ * @update_key:      If true, rewrite the cipher key in SCTX
+ * @data_size:       Length of the data in the BD field
+ *
+ * Assumes much of the header was already filled in at setkey() time in
+ * spum_cipher_req_init().
+ * spum_cipher_req_init() fills in the encryption key. For RC4, when submitting
+ * a request for a non-first chunk, we use the 260-byte SUPDT field from the
+ * previous response as the key. update_key is true for this case. Unused in all
+ * other cases.
+ */
+void spum_cipher_req_finish(u8 *spu_hdr,
+			    u16 spu_req_hdr_len,
+			    unsigned int is_inbound,
+			    struct spu_cipher_parms *cipher_parms,
+			    bool update_key,
+			    unsigned int data_size)
+{
+	struct SPUHEADER *spuh;
+	struct BDESC_HEADER *bdesc;
+	struct BD_HEADER *bd;
+	u8 *bdesc_ptr = spu_hdr + spu_req_hdr_len -
+	    (sizeof(struct BD_HEADER) + sizeof(struct BDESC_HEADER));
+
+	u32 cipher_bits;
+
+	flow_log("%s()\n", __func__);
+	flow_log(" in: %u\n", is_inbound);
+	flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
+		 cipher_parms->type);
+	if (update_key) {
+		flow_log(" cipher key len: %u\n", cipher_parms->key_len);
+		flow_dump("  key: ", cipher_parms->key_buf,
+			  cipher_parms->key_len);
+	}
+
+	/*
+	 * In XTS mode, API puts "i" parameter (block tweak) in IV.  For
+	 * SPU-M, should be in start of the BD; tx_sg_create() copies it there.
+	 * IV in SPU msg for SPU-M should be 0, since that's the "j" parameter
+	 * (block ctr within larger data unit) - given we can send entire disk
+	 * block (<= 4KB) in 1 SPU msg, don't need to use this parameter.
+	 */
+	if (cipher_parms->mode == CIPHER_MODE_XTS)
+		memset(cipher_parms->iv_buf, 0, cipher_parms->iv_len);
+
+	flow_log(" iv len: %d\n", cipher_parms->iv_len);
+	flow_dump("    iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
+	flow_log(" data_size: %u\n", data_size);
+
+	/* format master header word */
+	/* Do not set the next bit even though the datasheet says to */
+	spuh = (struct SPUHEADER *)spu_hdr;
+
+	/* cipher_bits was initialized at setkey time */
+	cipher_bits = be32_to_cpu(spuh->sa.cipher_flags);
+
+	/* Format sctx word 1 (cipher_bits) */
+	if (is_inbound)
+		cipher_bits |= CIPHER_INBOUND;
+	else
+		cipher_bits &= ~CIPHER_INBOUND;
+
+	/* update encryption key for RC4 on non-first chunk */
+	if (update_key) {
+		spuh->sa.cipher_flags |=
+			cipher_parms->type << CIPHER_TYPE_SHIFT;
+		memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len);
+	}
+
+	if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len)
+		/* cipher iv provided so put it in here */
+		memcpy(bdesc_ptr - cipher_parms->iv_len, cipher_parms->iv_buf,
+		       cipher_parms->iv_len);
+
+	spuh->sa.cipher_flags = cpu_to_be32(cipher_bits);
+
+	/* === create the BDESC section === */
+	bdesc = (struct BDESC_HEADER *)bdesc_ptr;
+	bdesc->offset_mac = 0;
+	bdesc->length_mac = 0;
+	bdesc->offset_crypto = 0;
+
+	/* XTS mode, data_size needs to include tweak parameter */
+	if (cipher_parms->mode == CIPHER_MODE_XTS)
+		bdesc->length_crypto = cpu_to_be16(data_size +
+						  SPU_XTS_TWEAK_SIZE);
+	else
+		bdesc->length_crypto = cpu_to_be16(data_size);
+
+	bdesc->offset_icv = 0;
+	bdesc->offset_iv = 0;
+
+	/* === no MFM section === */
+
+	/* === create the BD section === */
+	/* add the BD header */
+	bd = (struct BD_HEADER *)(bdesc_ptr + sizeof(struct BDESC_HEADER));
+	bd->size = cpu_to_be16(data_size);
+
+	/* XTS mode, data_size needs to include tweak parameter */
+	if (cipher_parms->mode == CIPHER_MODE_XTS)
+		bd->size = cpu_to_be16(data_size + SPU_XTS_TWEAK_SIZE);
+	else
+		bd->size = cpu_to_be16(data_size);
+
+	bd->prev_length = 0;
+
+	packet_dump("  SPU request header: ", spu_hdr, spu_req_hdr_len);
+}
+
+/**
+ * spum_request_pad() - Create pad bytes at the end of the data.
+ * @pad_start:		Start of buffer where pad bytes are to be written
+ * @gcm_ccm_padding:	length of GCM/CCM padding, in bytes
+ * @hash_pad_len:	Number of bytes of padding extend data to full block
+ * @auth_alg:		authentication algorithm
+ * @auth_mode:		authentication mode
+ * @total_sent:		length inserted at end of hash pad
+ * @status_padding:	Number of bytes of padding to align STATUS word
+ *
+ * There may be three forms of pad:
+ *  1. GCM/CCM pad - for GCM/CCM mode ciphers, pad to 16-byte alignment
+ *  2. hash pad - pad to a block length, with 0x80 data terminator and
+ *                size at the end
+ *  3. STAT pad - to ensure the STAT field is 4-byte aligned
+ */
+void spum_request_pad(u8 *pad_start,
+		      u32 gcm_ccm_padding,
+		      u32 hash_pad_len,
+		      enum hash_alg auth_alg,
+		      enum hash_mode auth_mode,
+		      unsigned int total_sent, u32 status_padding)
+{
+	u8 *ptr = pad_start;
+
+	/* fix data alignent for GCM/CCM */
+	if (gcm_ccm_padding > 0) {
+		flow_log("  GCM: padding to 16 byte alignment: %u bytes\n",
+			 gcm_ccm_padding);
+		memset(ptr, 0, gcm_ccm_padding);
+		ptr += gcm_ccm_padding;
+	}
+
+	if (hash_pad_len > 0) {
+		/* clear the padding section */
+		memset(ptr, 0, hash_pad_len);
+
+		if ((auth_alg == HASH_ALG_AES) &&
+		    (auth_mode == HASH_MODE_XCBC)) {
+			/* AES/XCBC just requires padding to be 0s */
+			ptr += hash_pad_len;
+		} else {
+			/* terminate the data */
+			*ptr = 0x80;
+			ptr += (hash_pad_len - sizeof(u64));
+
+			/* add the size at the end as required per alg */
+			if (auth_alg == HASH_ALG_MD5)
+				*(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
+			else		/* SHA1, SHA2-224, SHA2-256 */
+				*(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
+			ptr += sizeof(u64);
+		}
+	}
+
+	/* pad to a 4byte alignment for STAT */
+	if (status_padding > 0) {
+		flow_log("  STAT: padding to 4 byte alignment: %u bytes\n",
+			 status_padding);
+
+		memset(ptr, 0, status_padding);
+		ptr += status_padding;
+	}
+}
+
+/**
+ * spum_xts_tweak_in_payload() - Indicate that SPUM DOES place the XTS tweak
+ * field in the packet payload (rather than using IV)
+ *
+ * Return: 1
+ */
+u8 spum_xts_tweak_in_payload(void)
+{
+	return 1;
+}
+
+/**
+ * spum_tx_status_len() - Return the length of the STATUS field in a SPU
+ * response message.
+ *
+ * Return: Length of STATUS field in bytes.
+ */
+u8 spum_tx_status_len(void)
+{
+	return SPU_TX_STATUS_LEN;
+}
+
+/**
+ * spum_rx_status_len() - Return the length of the STATUS field in a SPU
+ * response message.
+ *
+ * Return: Length of STATUS field in bytes.
+ */
+u8 spum_rx_status_len(void)
+{
+	return SPU_RX_STATUS_LEN;
+}
+
+/**
+ * spum_status_process() - Process the status from a SPU response message.
+ * @statp:  start of STATUS word
+ * Return:
+ *   0 - if status is good and response should be processed
+ *   !0 - status indicates an error and response is invalid
+ */
+int spum_status_process(u8 *statp)
+{
+	u32 status;
+
+	status = __be32_to_cpu(*(__be32 *)statp);
+	flow_log("SPU response STATUS %#08x\n", status);
+	if (status & SPU_STATUS_ERROR_FLAG) {
+		pr_err("%s() Warning: Error result from SPU: %#08x\n",
+		       __func__, status);
+		if (status & SPU_STATUS_INVALID_ICV)
+			return SPU_INVALID_ICV;
+		return -EBADMSG;
+	}
+	return 0;
+}
+
+/**
+ * spum_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
+ *
+ * @digestsize:		Digest size of this request
+ * @cipher_parms:	(pointer to) cipher parmaeters, includes IV buf & IV len
+ * @assoclen:		Length of AAD data
+ * @chunksize:		length of input data to be sent in this req
+ * @is_encrypt:		true if this is an output/encrypt operation
+ * @is_esp:		true if this is an ESP / RFC4309 operation
+ *
+ */
+void spum_ccm_update_iv(unsigned int digestsize,
+			struct spu_cipher_parms *cipher_parms,
+			unsigned int assoclen,
+			unsigned int chunksize,
+			bool is_encrypt,
+			bool is_esp)
+{
+	u8 L;		/* L from CCM algorithm, length of plaintext data */
+	u8 mprime;	/* M' from CCM algo, (M - 2) / 2, where M=authsize */
+	u8 adata;
+
+	if (cipher_parms->iv_len != CCM_AES_IV_SIZE) {
+		pr_err("%s(): Invalid IV len %d for CCM mode, should be %d\n",
+		       __func__, cipher_parms->iv_len, CCM_AES_IV_SIZE);
+		return;
+	}
+
+	/*
+	 * IV needs to be formatted as follows:
+	 *
+	 * |          Byte 0               | Bytes 1 - N | Bytes (N+1) - 15 |
+	 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | Bits 7 - 0  |    Bits 7 - 0    |
+	 * | 0 |Ad?|(M - 2) / 2|   L - 1   |    Nonce    | Plaintext Length |
+	 *
+	 * Ad? = 1 if AAD present, 0 if not present
+	 * M = size of auth field, 8, 12, or 16 bytes (SPU-M) -or-
+	 *                         4, 6, 8, 10, 12, 14, 16 bytes (SPU2)
+	 * L = Size of Plaintext Length field; Nonce size = 15 - L
+	 *
+	 * It appears that the crypto API already expects the L-1 portion
+	 * to be set in the first byte of the IV, which implicitly determines
+	 * the nonce size, and also fills in the nonce.  But the other bits
+	 * in byte 0 as well as the plaintext length need to be filled in.
+	 *
+	 * In rfc4309/esp mode, L is not already in the supplied IV and
+	 * we need to fill it in, as well as move the IV data to be after
+	 * the salt
+	 */
+	if (is_esp) {
+		L = CCM_ESP_L_VALUE;	/* RFC4309 has fixed L */
+	} else {
+		/* L' = plaintext length - 1 so Plaintext length is L' + 1 */
+		L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >>
+		      CCM_B0_L_PRIME_SHIFT) + 1;
+	}
+
+	mprime = (digestsize - 2) >> 1;  /* M' = (M - 2) / 2 */
+	adata = (assoclen > 0);  /* adata = 1 if any associated data */
+
+	cipher_parms->iv_buf[0] = (adata << CCM_B0_ADATA_SHIFT) |
+				  (mprime << CCM_B0_M_PRIME_SHIFT) |
+				  ((L - 1) << CCM_B0_L_PRIME_SHIFT);
+
+	/* Nonce is already filled in by crypto API, and is 15 - L bytes */
+
+	/* Don't include digest in plaintext size when decrypting */
+	if (!is_encrypt)
+		chunksize -= digestsize;
+
+	/* Fill in length of plaintext, formatted to be L bytes long */
+	format_value_ccm(chunksize, &cipher_parms->iv_buf[15 - L + 1], L);
+}
+
+/**
+ * spum_wordalign_padlen() - Given the length of a data field, determine the
+ * padding required to align the data following this field on a 4-byte boundary.
+ * @data_size: length of data field in bytes
+ *
+ * Return: length of status field padding, in bytes
+ */
+u32 spum_wordalign_padlen(u32 data_size)
+{
+	return ((data_size + 3) & ~3) - data_size;
+}
diff --git a/drivers/crypto/bcm/spu.h b/drivers/crypto/bcm/spu.h
new file mode 100644
index 0000000..aa6fc38
--- /dev/null
+++ b/drivers/crypto/bcm/spu.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+/*
+ * This file contains the definition of SPU messages. There are currently two
+ * SPU message formats: SPU-M and SPU2. The hardware uses different values to
+ * identify the same things in SPU-M vs SPU2. So this file defines values that
+ * are hardware independent. Software can use these values for any version of
+ * SPU hardware. These values are used in APIs in spu.c. Functions internal to
+ * spu.c and spu2.c convert these to hardware-specific values.
+ */
+
+#ifndef _SPU_H
+#define _SPU_H
+
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <crypto/sha.h>
+
+enum spu_cipher_alg {
+	CIPHER_ALG_NONE = 0x0,
+	CIPHER_ALG_RC4 = 0x1,
+	CIPHER_ALG_DES = 0x2,
+	CIPHER_ALG_3DES = 0x3,
+	CIPHER_ALG_AES = 0x4,
+	CIPHER_ALG_LAST = 0x5
+};
+
+enum spu_cipher_mode {
+	CIPHER_MODE_NONE = 0x0,
+	CIPHER_MODE_ECB = 0x0,
+	CIPHER_MODE_CBC = 0x1,
+	CIPHER_MODE_OFB = 0x2,
+	CIPHER_MODE_CFB = 0x3,
+	CIPHER_MODE_CTR = 0x4,
+	CIPHER_MODE_CCM = 0x5,
+	CIPHER_MODE_GCM = 0x6,
+	CIPHER_MODE_XTS = 0x7,
+	CIPHER_MODE_LAST = 0x8
+};
+
+enum spu_cipher_type {
+	CIPHER_TYPE_NONE = 0x0,
+	CIPHER_TYPE_DES = 0x0,
+	CIPHER_TYPE_3DES = 0x0,
+	CIPHER_TYPE_INIT = 0x0,	/* used for ARC4 */
+	CIPHER_TYPE_AES128 = 0x0,
+	CIPHER_TYPE_AES192 = 0x1,
+	CIPHER_TYPE_UPDT = 0x1,	/* used for ARC4 */
+	CIPHER_TYPE_AES256 = 0x2,
+};
+
+enum hash_alg {
+	HASH_ALG_NONE = 0x0,
+	HASH_ALG_MD5 = 0x1,
+	HASH_ALG_SHA1 = 0x2,
+	HASH_ALG_SHA224 = 0x3,
+	HASH_ALG_SHA256 = 0x4,
+	HASH_ALG_AES = 0x5,
+	HASH_ALG_SHA384 = 0x6,
+	HASH_ALG_SHA512 = 0x7,
+	/* Keep SHA3 algorithms at the end always */
+	HASH_ALG_SHA3_224 = 0x8,
+	HASH_ALG_SHA3_256 = 0x9,
+	HASH_ALG_SHA3_384 = 0xa,
+	HASH_ALG_SHA3_512 = 0xb,
+	HASH_ALG_LAST
+};
+
+enum hash_mode {
+	HASH_MODE_NONE = 0x0,
+	HASH_MODE_HASH = 0x0,
+	HASH_MODE_XCBC = 0x0,
+	HASH_MODE_CMAC = 0x1,
+	HASH_MODE_CTXT = 0x1,
+	HASH_MODE_HMAC = 0x2,
+	HASH_MODE_RABIN = 0x4,
+	HASH_MODE_FHMAC = 0x6,
+	HASH_MODE_CCM = 0x5,
+	HASH_MODE_GCM = 0x6,
+};
+
+enum hash_type {
+	HASH_TYPE_NONE = 0x0,
+	HASH_TYPE_FULL = 0x0,
+	HASH_TYPE_INIT = 0x1,
+	HASH_TYPE_UPDT = 0x2,
+	HASH_TYPE_FIN = 0x3,
+	HASH_TYPE_AES128 = 0x0,
+	HASH_TYPE_AES192 = 0x1,
+	HASH_TYPE_AES256 = 0x2
+};
+
+enum aead_type {
+	AES_CCM,
+	AES_GCM,
+	AUTHENC,
+	AEAD_TYPE_LAST
+};
+
+extern char *hash_alg_name[HASH_ALG_LAST];
+extern char *aead_alg_name[AEAD_TYPE_LAST];
+
+struct spu_request_opts {
+	bool is_inbound;
+	bool auth_first;
+	bool is_aead;
+	bool is_esp;
+	bool bd_suppress;
+	bool is_rfc4543;
+};
+
+struct spu_cipher_parms {
+	enum spu_cipher_alg  alg;
+	enum spu_cipher_mode mode;
+	enum spu_cipher_type type;
+	u8                  *key_buf;
+	u16                  key_len;
+	/* iv_buf and iv_len include salt, if applicable */
+	u8                  *iv_buf;
+	u16                  iv_len;
+};
+
+struct spu_hash_parms {
+	enum hash_alg  alg;
+	enum hash_mode mode;
+	enum hash_type type;
+	u8             digestsize;
+	u8            *key_buf;
+	u16            key_len;
+	u16            prebuf_len;
+	/* length of hash pad. signed, needs to handle roll-overs */
+	int            pad_len;
+};
+
+struct spu_aead_parms {
+	u32 assoc_size;
+	u16 iv_len;      /* length of IV field between assoc data and data */
+	u8  aad_pad_len; /* For AES GCM/CCM, length of padding after AAD */
+	u8  data_pad_len;/* For AES GCM/CCM, length of padding after data */
+	bool return_iv;  /* True if SPU should return an IV */
+	u32 ret_iv_len;  /* Length in bytes of returned IV */
+	u32 ret_iv_off;  /* Offset into full IV if partial IV returned */
+};
+
+/************** SPU sizes ***************/
+
+#define SPU_RX_STATUS_LEN  4
+
+/* Max length of padding for 4-byte alignment of STATUS field */
+#define SPU_STAT_PAD_MAX  4
+
+/* Max length of pad fragment. 4 is for 4-byte alignment of STATUS field */
+#define SPU_PAD_LEN_MAX (SPU_GCM_CCM_ALIGN + MAX_HASH_BLOCK_SIZE + \
+			 SPU_STAT_PAD_MAX)
+
+/* GCM and CCM require 16-byte alignment */
+#define SPU_GCM_CCM_ALIGN 16
+
+/* Length up SUPDT field in SPU response message for RC4 */
+#define SPU_SUPDT_LEN 260
+
+/* SPU status error codes. These used as common error codes across all
+ * SPU variants.
+ */
+#define SPU_INVALID_ICV  1
+
+/* Indicates no limit to the length of the payload in a SPU message */
+#define SPU_MAX_PAYLOAD_INF  0xFFFFFFFF
+
+/* Size of XTS tweak ("i" parameter), in bytes */
+#define SPU_XTS_TWEAK_SIZE 16
+
+/* CCM B_0 field definitions, common for SPU-M and SPU2 */
+#define CCM_B0_ADATA		0x40
+#define CCM_B0_ADATA_SHIFT	   6
+#define CCM_B0_M_PRIME		0x38
+#define CCM_B0_M_PRIME_SHIFT	   3
+#define CCM_B0_L_PRIME		0x07
+#define CCM_B0_L_PRIME_SHIFT	   0
+#define CCM_ESP_L_VALUE		   4
+
+/**
+ * spu_req_incl_icv() - Return true if SPU request message should include the
+ * ICV as a separate buffer.
+ * @cipher_mode:  the cipher mode being requested
+ * @is_encrypt:   true if encrypting. false if decrypting.
+ *
+ * Return:  true if ICV to be included as separate buffer
+ */
+static __always_inline  bool spu_req_incl_icv(enum spu_cipher_mode cipher_mode,
+					      bool is_encrypt)
+{
+	if ((cipher_mode == CIPHER_MODE_GCM) && !is_encrypt)
+		return true;
+	if ((cipher_mode == CIPHER_MODE_CCM) && !is_encrypt)
+		return true;
+
+	return false;
+}
+
+static __always_inline u32 spu_real_db_size(u32 assoc_size,
+					    u32 aead_iv_buf_len,
+					    u32 prebuf_len,
+					    u32 data_size,
+					    u32 aad_pad_len,
+					    u32 gcm_pad_len,
+					    u32 hash_pad_len)
+{
+	return assoc_size + aead_iv_buf_len + prebuf_len + data_size +
+	    aad_pad_len + gcm_pad_len + hash_pad_len;
+}
+
+/************** SPU Functions Prototypes **************/
+
+void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len);
+
+u32 spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
+			     enum spu_cipher_mode cipher_mode,
+			     unsigned int blocksize);
+u32 spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg,
+			     enum spu_cipher_mode cipher_mode,
+			     unsigned int blocksize);
+u32 spum_payload_length(u8 *spu_hdr);
+u16 spum_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash);
+u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
+		      u32 chunksize, u16 hash_block_size);
+u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
+			 unsigned int data_size);
+u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
+			unsigned int assoc_len, unsigned int iv_len,
+			bool is_encrypt);
+u8 spum_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len);
+bool spu_req_incl_icv(enum spu_cipher_mode cipher_mode, bool is_encrypt);
+enum hash_type spum_hash_type(u32 src_sent);
+u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
+		     enum hash_type htype);
+
+u32 spum_create_request(u8 *spu_hdr,
+			struct spu_request_opts *req_opts,
+			struct spu_cipher_parms *cipher_parms,
+			struct spu_hash_parms *hash_parms,
+			struct spu_aead_parms *aead_parms,
+			unsigned int data_size);
+
+u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms);
+
+void spum_cipher_req_finish(u8 *spu_hdr,
+			    u16 spu_req_hdr_len,
+			    unsigned int is_inbound,
+			    struct spu_cipher_parms *cipher_parms,
+			    bool update_key,
+			    unsigned int data_size);
+
+void spum_request_pad(u8 *pad_start,
+		      u32 gcm_padding,
+		      u32 hash_pad_len,
+		      enum hash_alg auth_alg,
+		      enum hash_mode auth_mode,
+		      unsigned int total_sent, u32 status_padding);
+
+u8 spum_xts_tweak_in_payload(void);
+u8 spum_tx_status_len(void);
+u8 spum_rx_status_len(void);
+int spum_status_process(u8 *statp);
+
+void spum_ccm_update_iv(unsigned int digestsize,
+			struct spu_cipher_parms *cipher_parms,
+			unsigned int assoclen,
+			unsigned int chunksize,
+			bool is_encrypt,
+			bool is_esp);
+u32 spum_wordalign_padlen(u32 data_size);
+#endif
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
new file mode 100644
index 0000000..bf7ac62
--- /dev/null
+++ b/drivers/crypto/bcm/spu2.c
@@ -0,0 +1,1402 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+/*
+ * This file works with the SPU2 version of the SPU. SPU2 has different message
+ * formats than the previous version of the SPU. All SPU message format
+ * differences should be hidden in the spux.c,h files.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include "util.h"
+#include "spu.h"
+#include "spu2.h"
+
+#define SPU2_TX_STATUS_LEN  0	/* SPU2 has no STATUS in input packet */
+
+/*
+ * Controlled by pkt_stat_cnt field in CRYPTO_SS_SPU0_CORE_SPU2_CONTROL0
+ * register. Defaults to 2.
+ */
+#define SPU2_RX_STATUS_LEN  2
+
+enum spu2_proto_sel {
+	SPU2_PROTO_RESV = 0,
+	SPU2_MACSEC_SECTAG8_ECB = 1,
+	SPU2_MACSEC_SECTAG8_SCB = 2,
+	SPU2_MACSEC_SECTAG16 = 3,
+	SPU2_MACSEC_SECTAG16_8_XPN = 4,
+	SPU2_IPSEC = 5,
+	SPU2_IPSEC_ESN = 6,
+	SPU2_TLS_CIPHER = 7,
+	SPU2_TLS_AEAD = 8,
+	SPU2_DTLS_CIPHER = 9,
+	SPU2_DTLS_AEAD = 10
+};
+
+char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
+	"DES", "3DES"
+};
+
+char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS",
+	"CCM", "GCM"
+};
+
+char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
+	"Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
+	"SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
+	"SHA3-384", "SHA3-512"
+};
+
+char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
+	"Rabin", "CCM", "GCM", "Reserved"
+};
+
+static char *spu2_ciph_type_name(enum spu2_cipher_type cipher_type)
+{
+	if (cipher_type >= SPU2_CIPHER_TYPE_LAST)
+		return "Reserved";
+	return spu2_cipher_type_names[cipher_type];
+}
+
+static char *spu2_ciph_mode_name(enum spu2_cipher_mode cipher_mode)
+{
+	if (cipher_mode >= SPU2_CIPHER_MODE_LAST)
+		return "Reserved";
+	return spu2_cipher_mode_names[cipher_mode];
+}
+
+static char *spu2_hash_type_name(enum spu2_hash_type hash_type)
+{
+	if (hash_type >= SPU2_HASH_TYPE_LAST)
+		return "Reserved";
+	return spu2_hash_type_names[hash_type];
+}
+
+static char *spu2_hash_mode_name(enum spu2_hash_mode hash_mode)
+{
+	if (hash_mode >= SPU2_HASH_MODE_LAST)
+		return "Reserved";
+	return spu2_hash_mode_names[hash_mode];
+}
+
+/*
+ * Convert from a software cipher mode value to the corresponding value
+ * for SPU2.
+ */
+static int spu2_cipher_mode_xlate(enum spu_cipher_mode cipher_mode,
+				  enum spu2_cipher_mode *spu2_mode)
+{
+	switch (cipher_mode) {
+	case CIPHER_MODE_ECB:
+		*spu2_mode = SPU2_CIPHER_MODE_ECB;
+		break;
+	case CIPHER_MODE_CBC:
+		*spu2_mode = SPU2_CIPHER_MODE_CBC;
+		break;
+	case CIPHER_MODE_OFB:
+		*spu2_mode = SPU2_CIPHER_MODE_OFB;
+		break;
+	case CIPHER_MODE_CFB:
+		*spu2_mode = SPU2_CIPHER_MODE_CFB;
+		break;
+	case CIPHER_MODE_CTR:
+		*spu2_mode = SPU2_CIPHER_MODE_CTR;
+		break;
+	case CIPHER_MODE_CCM:
+		*spu2_mode = SPU2_CIPHER_MODE_CCM;
+		break;
+	case CIPHER_MODE_GCM:
+		*spu2_mode = SPU2_CIPHER_MODE_GCM;
+		break;
+	case CIPHER_MODE_XTS:
+		*spu2_mode = SPU2_CIPHER_MODE_XTS;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * spu2_cipher_xlate() - Convert a cipher {alg/mode/type} triple to a SPU2
+ * cipher type and mode.
+ * @cipher_alg:  [in]  cipher algorithm value from software enumeration
+ * @cipher_mode: [in]  cipher mode value from software enumeration
+ * @cipher_type: [in]  cipher type value from software enumeration
+ * @spu2_type:   [out] cipher type value used by spu2 hardware
+ * @spu2_mode:   [out] cipher mode value used by spu2 hardware
+ *
+ * Return:  0 if successful
+ */
+static int spu2_cipher_xlate(enum spu_cipher_alg cipher_alg,
+			     enum spu_cipher_mode cipher_mode,
+			     enum spu_cipher_type cipher_type,
+			     enum spu2_cipher_type *spu2_type,
+			     enum spu2_cipher_mode *spu2_mode)
+{
+	int err;
+
+	err = spu2_cipher_mode_xlate(cipher_mode, spu2_mode);
+	if (err) {
+		flow_log("Invalid cipher mode %d\n", cipher_mode);
+		return err;
+	}
+
+	switch (cipher_alg) {
+	case CIPHER_ALG_NONE:
+		*spu2_type = SPU2_CIPHER_TYPE_NONE;
+		break;
+	case CIPHER_ALG_RC4:
+		/* SPU2 does not support RC4 */
+		err = -EINVAL;
+		*spu2_type = SPU2_CIPHER_TYPE_NONE;
+		break;
+	case CIPHER_ALG_DES:
+		*spu2_type = SPU2_CIPHER_TYPE_DES;
+		break;
+	case CIPHER_ALG_3DES:
+		*spu2_type = SPU2_CIPHER_TYPE_3DES;
+		break;
+	case CIPHER_ALG_AES:
+		switch (cipher_type) {
+		case CIPHER_TYPE_AES128:
+			*spu2_type = SPU2_CIPHER_TYPE_AES128;
+			break;
+		case CIPHER_TYPE_AES192:
+			*spu2_type = SPU2_CIPHER_TYPE_AES192;
+			break;
+		case CIPHER_TYPE_AES256:
+			*spu2_type = SPU2_CIPHER_TYPE_AES256;
+			break;
+		default:
+			err = -EINVAL;
+		}
+		break;
+	case CIPHER_ALG_LAST:
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	if (err)
+		flow_log("Invalid cipher alg %d or type %d\n",
+			 cipher_alg, cipher_type);
+	return err;
+}
+
+/*
+ * Convert from a software hash mode value to the corresponding value
+ * for SPU2. Note that HASH_MODE_NONE and HASH_MODE_XCBC have the same value.
+ */
+static int spu2_hash_mode_xlate(enum hash_mode hash_mode,
+				enum spu2_hash_mode *spu2_mode)
+{
+	switch (hash_mode) {
+	case HASH_MODE_XCBC:
+		*spu2_mode = SPU2_HASH_MODE_XCBC_MAC;
+		break;
+	case HASH_MODE_CMAC:
+		*spu2_mode = SPU2_HASH_MODE_CMAC;
+		break;
+	case HASH_MODE_HMAC:
+		*spu2_mode = SPU2_HASH_MODE_HMAC;
+		break;
+	case HASH_MODE_CCM:
+		*spu2_mode = SPU2_HASH_MODE_CCM;
+		break;
+	case HASH_MODE_GCM:
+		*spu2_mode = SPU2_HASH_MODE_GCM;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * spu2_hash_xlate() - Convert a hash {alg/mode/type} triple to a SPU2 hash type
+ * and mode.
+ * @hash_alg:  [in] hash algorithm value from software enumeration
+ * @hash_mode: [in] hash mode value from software enumeration
+ * @hash_type: [in] hash type value from software enumeration
+ * @ciph_type: [in] cipher type value from software enumeration
+ * @spu2_type: [out] hash type value used by SPU2 hardware
+ * @spu2_mode: [out] hash mode value used by SPU2 hardware
+ *
+ * Return:  0 if successful
+ */
+static int
+spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
+		enum hash_type hash_type, enum spu_cipher_type ciph_type,
+		enum spu2_hash_type *spu2_type, enum spu2_hash_mode *spu2_mode)
+{
+	int err;
+
+	err = spu2_hash_mode_xlate(hash_mode, spu2_mode);
+	if (err) {
+		flow_log("Invalid hash mode %d\n", hash_mode);
+		return err;
+	}
+
+	switch (hash_alg) {
+	case HASH_ALG_NONE:
+		*spu2_type = SPU2_HASH_TYPE_NONE;
+		break;
+	case HASH_ALG_MD5:
+		*spu2_type = SPU2_HASH_TYPE_MD5;
+		break;
+	case HASH_ALG_SHA1:
+		*spu2_type = SPU2_HASH_TYPE_SHA1;
+		break;
+	case HASH_ALG_SHA224:
+		*spu2_type = SPU2_HASH_TYPE_SHA224;
+		break;
+	case HASH_ALG_SHA256:
+		*spu2_type = SPU2_HASH_TYPE_SHA256;
+		break;
+	case HASH_ALG_SHA384:
+		*spu2_type = SPU2_HASH_TYPE_SHA384;
+		break;
+	case HASH_ALG_SHA512:
+		*spu2_type = SPU2_HASH_TYPE_SHA512;
+		break;
+	case HASH_ALG_AES:
+		switch (ciph_type) {
+		case CIPHER_TYPE_AES128:
+			*spu2_type = SPU2_HASH_TYPE_AES128;
+			break;
+		case CIPHER_TYPE_AES192:
+			*spu2_type = SPU2_HASH_TYPE_AES192;
+			break;
+		case CIPHER_TYPE_AES256:
+			*spu2_type = SPU2_HASH_TYPE_AES256;
+			break;
+		default:
+			err = -EINVAL;
+		}
+		break;
+	case HASH_ALG_SHA3_224:
+		*spu2_type = SPU2_HASH_TYPE_SHA3_224;
+		break;
+	case HASH_ALG_SHA3_256:
+		*spu2_type = SPU2_HASH_TYPE_SHA3_256;
+		break;
+	case HASH_ALG_SHA3_384:
+		*spu2_type = SPU2_HASH_TYPE_SHA3_384;
+		break;
+	case HASH_ALG_SHA3_512:
+		*spu2_type = SPU2_HASH_TYPE_SHA3_512;
+		break;
+	case HASH_ALG_LAST:
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	if (err)
+		flow_log("Invalid hash alg %d or type %d\n",
+			 hash_alg, hash_type);
+	return err;
+}
+
+/* Dump FMD ctrl0. The ctrl0 input is in host byte order */
+static void spu2_dump_fmd_ctrl0(u64 ctrl0)
+{
+	enum spu2_cipher_type ciph_type;
+	enum spu2_cipher_mode ciph_mode;
+	enum spu2_hash_type hash_type;
+	enum spu2_hash_mode hash_mode;
+	char *ciph_name;
+	char *ciph_mode_name;
+	char *hash_name;
+	char *hash_mode_name;
+	u8 cfb;
+	u8 proto;
+
+	packet_log(" FMD CTRL0 %#16llx\n", ctrl0);
+	if (ctrl0 & SPU2_CIPH_ENCRYPT_EN)
+		packet_log("  encrypt\n");
+	else
+		packet_log("  decrypt\n");
+
+	ciph_type = (ctrl0 & SPU2_CIPH_TYPE) >> SPU2_CIPH_TYPE_SHIFT;
+	ciph_name = spu2_ciph_type_name(ciph_type);
+	packet_log("  Cipher type: %s\n", ciph_name);
+
+	if (ciph_type != SPU2_CIPHER_TYPE_NONE) {
+		ciph_mode = (ctrl0 & SPU2_CIPH_MODE) >> SPU2_CIPH_MODE_SHIFT;
+		ciph_mode_name = spu2_ciph_mode_name(ciph_mode);
+		packet_log("  Cipher mode: %s\n", ciph_mode_name);
+	}
+
+	cfb = (ctrl0 & SPU2_CFB_MASK) >> SPU2_CFB_MASK_SHIFT;
+	packet_log("  CFB %#x\n", cfb);
+
+	proto = (ctrl0 & SPU2_PROTO_SEL) >> SPU2_PROTO_SEL_SHIFT;
+	packet_log("  protocol %#x\n", proto);
+
+	if (ctrl0 & SPU2_HASH_FIRST)
+		packet_log("  hash first\n");
+	else
+		packet_log("  cipher first\n");
+
+	if (ctrl0 & SPU2_CHK_TAG)
+		packet_log("  check tag\n");
+
+	hash_type = (ctrl0 & SPU2_HASH_TYPE) >> SPU2_HASH_TYPE_SHIFT;
+	hash_name = spu2_hash_type_name(hash_type);
+	packet_log("  Hash type: %s\n", hash_name);
+
+	if (hash_type != SPU2_HASH_TYPE_NONE) {
+		hash_mode = (ctrl0 & SPU2_HASH_MODE) >> SPU2_HASH_MODE_SHIFT;
+		hash_mode_name = spu2_hash_mode_name(hash_mode);
+		packet_log("  Hash mode: %s\n", hash_mode_name);
+	}
+
+	if (ctrl0 & SPU2_CIPH_PAD_EN) {
+		packet_log("  Cipher pad: %#2llx\n",
+			   (ctrl0 & SPU2_CIPH_PAD) >> SPU2_CIPH_PAD_SHIFT);
+	}
+}
+
+/* Dump FMD ctrl1. The ctrl1 input is in host byte order */
+static void spu2_dump_fmd_ctrl1(u64 ctrl1)
+{
+	u8 hash_key_len;
+	u8 ciph_key_len;
+	u8 ret_iv_len;
+	u8 iv_offset;
+	u8 iv_len;
+	u8 hash_tag_len;
+	u8 ret_md;
+
+	packet_log(" FMD CTRL1 %#16llx\n", ctrl1);
+	if (ctrl1 & SPU2_TAG_LOC)
+		packet_log("  Tag after payload\n");
+
+	packet_log("  Msg includes ");
+	if (ctrl1 & SPU2_HAS_FR_DATA)
+		packet_log("FD ");
+	if (ctrl1 & SPU2_HAS_AAD1)
+		packet_log("AAD1 ");
+	if (ctrl1 & SPU2_HAS_NAAD)
+		packet_log("NAAD ");
+	if (ctrl1 & SPU2_HAS_AAD2)
+		packet_log("AAD2 ");
+	if (ctrl1 & SPU2_HAS_ESN)
+		packet_log("ESN ");
+	packet_log("\n");
+
+	hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
+	packet_log("  Hash key len %u\n", hash_key_len);
+
+	ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
+	packet_log("  Cipher key len %u\n", ciph_key_len);
+
+	if (ctrl1 & SPU2_GENIV)
+		packet_log("  Generate IV\n");
+
+	if (ctrl1 & SPU2_HASH_IV)
+		packet_log("  IV included in hash\n");
+
+	if (ctrl1 & SPU2_RET_IV)
+		packet_log("  Return IV in output before payload\n");
+
+	ret_iv_len = (ctrl1 & SPU2_RET_IV_LEN) >> SPU2_RET_IV_LEN_SHIFT;
+	packet_log("  Length of returned IV %u bytes\n",
+		   ret_iv_len ? ret_iv_len : 16);
+
+	iv_offset = (ctrl1 & SPU2_IV_OFFSET) >> SPU2_IV_OFFSET_SHIFT;
+	packet_log("  IV offset %u\n", iv_offset);
+
+	iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
+	packet_log("  Input IV len %u bytes\n", iv_len);
+
+	hash_tag_len = (ctrl1 & SPU2_HASH_TAG_LEN) >> SPU2_HASH_TAG_LEN_SHIFT;
+	packet_log("  Hash tag length %u bytes\n", hash_tag_len);
+
+	packet_log("  Return ");
+	ret_md = (ctrl1 & SPU2_RETURN_MD) >> SPU2_RETURN_MD_SHIFT;
+	if (ret_md)
+		packet_log("FMD ");
+	if (ret_md == SPU2_RET_FMD_OMD)
+		packet_log("OMD ");
+	else if (ret_md == SPU2_RET_FMD_OMD_IV)
+		packet_log("OMD IV ");
+	if (ctrl1 & SPU2_RETURN_FD)
+		packet_log("FD ");
+	if (ctrl1 & SPU2_RETURN_AAD1)
+		packet_log("AAD1 ");
+	if (ctrl1 & SPU2_RETURN_NAAD)
+		packet_log("NAAD ");
+	if (ctrl1 & SPU2_RETURN_AAD2)
+		packet_log("AAD2 ");
+	if (ctrl1 & SPU2_RETURN_PAY)
+		packet_log("Payload");
+	packet_log("\n");
+}
+
+/* Dump FMD ctrl2. The ctrl2 input is in host byte order */
+static void spu2_dump_fmd_ctrl2(u64 ctrl2)
+{
+	packet_log(" FMD CTRL2 %#16llx\n", ctrl2);
+
+	packet_log("  AAD1 offset %llu length %llu bytes\n",
+		   ctrl2 & SPU2_AAD1_OFFSET,
+		   (ctrl2 & SPU2_AAD1_LEN) >> SPU2_AAD1_LEN_SHIFT);
+	packet_log("  AAD2 offset %llu\n",
+		   (ctrl2 & SPU2_AAD2_OFFSET) >> SPU2_AAD2_OFFSET_SHIFT);
+	packet_log("  Payload offset %llu\n",
+		   (ctrl2 & SPU2_PL_OFFSET) >> SPU2_PL_OFFSET_SHIFT);
+}
+
+/* Dump FMD ctrl3. The ctrl3 input is in host byte order */
+static void spu2_dump_fmd_ctrl3(u64 ctrl3)
+{
+	packet_log(" FMD CTRL3 %#16llx\n", ctrl3);
+
+	packet_log("  Payload length %llu bytes\n", ctrl3 & SPU2_PL_LEN);
+	packet_log("  TLS length %llu bytes\n",
+		   (ctrl3 & SPU2_TLS_LEN) >> SPU2_TLS_LEN_SHIFT);
+}
+
+static void spu2_dump_fmd(struct SPU2_FMD *fmd)
+{
+	spu2_dump_fmd_ctrl0(le64_to_cpu(fmd->ctrl0));
+	spu2_dump_fmd_ctrl1(le64_to_cpu(fmd->ctrl1));
+	spu2_dump_fmd_ctrl2(le64_to_cpu(fmd->ctrl2));
+	spu2_dump_fmd_ctrl3(le64_to_cpu(fmd->ctrl3));
+}
+
+static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
+			  u16 hash_iv_len, u16 ciph_iv_len)
+{
+	u8 *ptr = omd;
+
+	packet_log(" OMD:\n");
+
+	if (hash_key_len) {
+		packet_log("  Hash Key Length %u bytes\n", hash_key_len);
+		packet_dump("  KEY: ", ptr, hash_key_len);
+		ptr += hash_key_len;
+	}
+
+	if (ciph_key_len) {
+		packet_log("  Cipher Key Length %u bytes\n", ciph_key_len);
+		packet_dump("  KEY: ", ptr, ciph_key_len);
+		ptr += ciph_key_len;
+	}
+
+	if (hash_iv_len) {
+		packet_log("  Hash IV Length %u bytes\n", hash_iv_len);
+		packet_dump("  hash IV: ", ptr, hash_iv_len);
+		ptr += ciph_key_len;
+	}
+
+	if (ciph_iv_len) {
+		packet_log("  Cipher IV Length %u bytes\n", ciph_iv_len);
+		packet_dump("  cipher IV: ", ptr, ciph_iv_len);
+	}
+}
+
+/* Dump a SPU2 header for debug */
+void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
+{
+	struct SPU2_FMD *fmd = (struct SPU2_FMD *)buf;
+	u8 *omd;
+	u64 ctrl1;
+	u16 hash_key_len;
+	u16 ciph_key_len;
+	u16 hash_iv_len;
+	u16 ciph_iv_len;
+	u16 omd_len;
+
+	packet_log("\n");
+	packet_log("SPU2 message header %p len: %u\n", buf, buf_len);
+
+	spu2_dump_fmd(fmd);
+	omd = (u8 *)(fmd + 1);
+
+	ctrl1 = le64_to_cpu(fmd->ctrl1);
+	hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
+	ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
+	hash_iv_len = 0;
+	ciph_iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
+	spu2_dump_omd(omd, hash_key_len, ciph_key_len, hash_iv_len,
+		      ciph_iv_len);
+
+	/* Double check sanity */
+	omd_len = hash_key_len + ciph_key_len + hash_iv_len + ciph_iv_len;
+	if (FMD_SIZE + omd_len != buf_len) {
+		packet_log
+		    (" Packet parsed incorrectly. buf_len %u, sum of MD %zu\n",
+		     buf_len, FMD_SIZE + omd_len);
+	}
+	packet_log("\n");
+}
+
+/**
+ * spu2_fmd_init() - At setkey time, initialize the fixed meta data for
+ * subsequent ablkcipher requests for this context.
+ * @spu2_cipher_type:  Cipher algorithm
+ * @spu2_mode:         Cipher mode
+ * @cipher_key_len:    Length of cipher key, in bytes
+ * @cipher_iv_len:     Length of cipher initialization vector, in bytes
+ *
+ * Return:  0 (success)
+ */
+static int spu2_fmd_init(struct SPU2_FMD *fmd,
+			 enum spu2_cipher_type spu2_type,
+			 enum spu2_cipher_mode spu2_mode,
+			 u32 cipher_key_len, u32 cipher_iv_len)
+{
+	u64 ctrl0;
+	u64 ctrl1;
+	u64 ctrl2;
+	u64 ctrl3;
+	u32 aad1_offset;
+	u32 aad2_offset;
+	u16 aad1_len = 0;
+	u64 payload_offset;
+
+	ctrl0 = (spu2_type << SPU2_CIPH_TYPE_SHIFT) |
+	    (spu2_mode << SPU2_CIPH_MODE_SHIFT);
+
+	ctrl1 = (cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) |
+	    ((u64)cipher_iv_len << SPU2_IV_LEN_SHIFT) |
+	    ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT) | SPU2_RETURN_PAY;
+
+	/*
+	 * AAD1 offset is from start of FD. FD length is always 0 for this
+	 * driver. So AAD1_offset is always 0.
+	 */
+	aad1_offset = 0;
+	aad2_offset = aad1_offset;
+	payload_offset = 0;
+	ctrl2 = aad1_offset |
+	    (aad1_len << SPU2_AAD1_LEN_SHIFT) |
+	    (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
+	    (payload_offset << SPU2_PL_OFFSET_SHIFT);
+
+	ctrl3 = 0;
+
+	fmd->ctrl0 = cpu_to_le64(ctrl0);
+	fmd->ctrl1 = cpu_to_le64(ctrl1);
+	fmd->ctrl2 = cpu_to_le64(ctrl2);
+	fmd->ctrl3 = cpu_to_le64(ctrl3);
+
+	return 0;
+}
+
+/**
+ * spu2_fmd_ctrl0_write() - Write ctrl0 field in fixed metadata (FMD) field of
+ * SPU request packet.
+ * @fmd:            Start of FMD field to be written
+ * @is_inbound:     true if decrypting. false if encrypting.
+ * @authFirst:      true if alg authenticates before encrypting
+ * @protocol:       protocol selector
+ * @cipher_type:    cipher algorithm
+ * @cipher_mode:    cipher mode
+ * @auth_type:      authentication type
+ * @auth_mode:      authentication mode
+ */
+static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
+				 bool is_inbound, bool auth_first,
+				 enum spu2_proto_sel protocol,
+				 enum spu2_cipher_type cipher_type,
+				 enum spu2_cipher_mode cipher_mode,
+				 enum spu2_hash_type auth_type,
+				 enum spu2_hash_mode auth_mode)
+{
+	u64 ctrl0 = 0;
+
+	if ((cipher_type != SPU2_CIPHER_TYPE_NONE) && !is_inbound)
+		ctrl0 |= SPU2_CIPH_ENCRYPT_EN;
+
+	ctrl0 |= ((u64)cipher_type << SPU2_CIPH_TYPE_SHIFT) |
+	    ((u64)cipher_mode << SPU2_CIPH_MODE_SHIFT);
+
+	if (protocol)
+		ctrl0 |= (u64)protocol << SPU2_PROTO_SEL_SHIFT;
+
+	if (auth_first)
+		ctrl0 |= SPU2_HASH_FIRST;
+
+	if (is_inbound && (auth_type != SPU2_HASH_TYPE_NONE))
+		ctrl0 |= SPU2_CHK_TAG;
+
+	ctrl0 |= (((u64)auth_type << SPU2_HASH_TYPE_SHIFT) |
+		  ((u64)auth_mode << SPU2_HASH_MODE_SHIFT));
+
+	fmd->ctrl0 = cpu_to_le64(ctrl0);
+}
+
+/**
+ * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
+ * SPU request packet.
+ * @fmd:            Start of FMD field to be written
+ * @assoc_size:     Length of additional associated data, in bytes
+ * @auth_key_len:   Length of authentication key, in bytes
+ * @cipher_key_len: Length of cipher key, in bytes
+ * @gen_iv:         If true, hw generates IV and returns in response
+ * @hash_iv:        IV participates in hash. Used for IPSEC and TLS.
+ * @return_iv:      Return IV in output packet before payload
+ * @ret_iv_len:     Length of IV returned from SPU, in bytes
+ * @ret_iv_offset:  Offset into full IV of start of returned IV
+ * @cipher_iv_len:  Length of input cipher IV, in bytes
+ * @digest_size:    Length of digest (aka, hash tag or ICV), in bytes
+ * @return_payload: Return payload in SPU response
+ * @return_md : return metadata in SPU response
+ *
+ * Packet can have AAD2 w/o AAD1. For algorithms currently supported,
+ * associated data goes in AAD2.
+ */
+static void spu2_fmd_ctrl1_write(struct SPU2_FMD *fmd, bool is_inbound,
+				 u64 assoc_size,
+				 u64 auth_key_len, u64 cipher_key_len,
+				 bool gen_iv, bool hash_iv, bool return_iv,
+				 u64 ret_iv_len, u64 ret_iv_offset,
+				 u64 cipher_iv_len, u64 digest_size,
+				 bool return_payload, bool return_md)
+{
+	u64 ctrl1 = 0;
+
+	if (is_inbound && digest_size)
+		ctrl1 |= SPU2_TAG_LOC;
+
+	if (assoc_size) {
+		ctrl1 |= SPU2_HAS_AAD2;
+		ctrl1 |= SPU2_RETURN_AAD2;  /* need aad2 for gcm aes esp */
+	}
+
+	if (auth_key_len)
+		ctrl1 |= ((auth_key_len << SPU2_HASH_KEY_LEN_SHIFT) &
+			  SPU2_HASH_KEY_LEN);
+
+	if (cipher_key_len)
+		ctrl1 |= ((cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) &
+			  SPU2_CIPH_KEY_LEN);
+
+	if (gen_iv)
+		ctrl1 |= SPU2_GENIV;
+
+	if (hash_iv)
+		ctrl1 |= SPU2_HASH_IV;
+
+	if (return_iv) {
+		ctrl1 |= SPU2_RET_IV;
+		ctrl1 |= ret_iv_len << SPU2_RET_IV_LEN_SHIFT;
+		ctrl1 |= ret_iv_offset << SPU2_IV_OFFSET_SHIFT;
+	}
+
+	ctrl1 |= ((cipher_iv_len << SPU2_IV_LEN_SHIFT) & SPU2_IV_LEN);
+
+	if (digest_size)
+		ctrl1 |= ((digest_size << SPU2_HASH_TAG_LEN_SHIFT) &
+			  SPU2_HASH_TAG_LEN);
+
+	/* Let's ask for the output pkt to include FMD, but don't need to
+	 * get keys and IVs back in OMD.
+	 */
+	if (return_md)
+		ctrl1 |= ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT);
+	else
+		ctrl1 |= ((u64)SPU2_RET_NO_MD << SPU2_RETURN_MD_SHIFT);
+
+	/* Crypto API does not get assoc data back. So no need for AAD2. */
+
+	if (return_payload)
+		ctrl1 |= SPU2_RETURN_PAY;
+
+	fmd->ctrl1 = cpu_to_le64(ctrl1);
+}
+
+/**
+ * spu2_fmd_ctrl2_write() - Set the ctrl2 field in the fixed metadata field of
+ * SPU2 header.
+ * @fmd:            Start of FMD field to be written
+ * @cipher_offset:  Number of bytes from Start of Packet (end of FD field) where
+ *                  data to be encrypted or decrypted begins
+ * @auth_key_len:   Length of authentication key, in bytes
+ * @auth_iv_len:    Length of authentication initialization vector, in bytes
+ * @cipher_key_len: Length of cipher key, in bytes
+ * @cipher_iv_len:  Length of cipher IV, in bytes
+ */
+static void spu2_fmd_ctrl2_write(struct SPU2_FMD *fmd, u64 cipher_offset,
+				 u64 auth_key_len, u64 auth_iv_len,
+				 u64 cipher_key_len, u64 cipher_iv_len)
+{
+	u64 ctrl2;
+	u64 aad1_offset;
+	u64 aad2_offset;
+	u16 aad1_len = 0;
+	u64 payload_offset;
+
+	/* AAD1 offset is from start of FD. FD length always 0. */
+	aad1_offset = 0;
+
+	aad2_offset = aad1_offset;
+	payload_offset = cipher_offset;
+	ctrl2 = aad1_offset |
+	    (aad1_len << SPU2_AAD1_LEN_SHIFT) |
+	    (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
+	    (payload_offset << SPU2_PL_OFFSET_SHIFT);
+
+	fmd->ctrl2 = cpu_to_le64(ctrl2);
+}
+
+/**
+ * spu2_fmd_ctrl3_write() - Set the ctrl3 field in FMD
+ * @fmd:          Fixed meta data. First field in SPU2 msg header.
+ * @payload_len:  Length of payload, in bytes
+ */
+static void spu2_fmd_ctrl3_write(struct SPU2_FMD *fmd, u64 payload_len)
+{
+	u64 ctrl3;
+
+	ctrl3 = payload_len & SPU2_PL_LEN;
+
+	fmd->ctrl3 = cpu_to_le64(ctrl3);
+}
+
+/**
+ * spu2_ctx_max_payload() - Determine the maximum length of the payload for a
+ * SPU message for a given cipher and hash alg context.
+ * @cipher_alg:		The cipher algorithm
+ * @cipher_mode:	The cipher mode
+ * @blocksize:		The size of a block of data for this algo
+ *
+ * For SPU2, the hardware generally ignores the PayloadLen field in ctrl3 of
+ * FMD and just keeps computing until it receives a DMA descriptor with the EOF
+ * flag set. So we consider the max payload to be infinite. AES CCM is an
+ * exception.
+ *
+ * Return: Max payload length in bytes
+ */
+u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
+			 enum spu_cipher_mode cipher_mode,
+			 unsigned int blocksize)
+{
+	if ((cipher_alg == CIPHER_ALG_AES) &&
+	    (cipher_mode == CIPHER_MODE_CCM)) {
+		u32 excess = SPU2_MAX_PAYLOAD % blocksize;
+
+		return SPU2_MAX_PAYLOAD - excess;
+	} else {
+		return SPU_MAX_PAYLOAD_INF;
+	}
+}
+
+/**
+ * spu_payload_length() -  Given a SPU2 message header, extract the payload
+ * length.
+ * @spu_hdr:  Start of SPU message header (FMD)
+ *
+ * Return: payload length, in bytes
+ */
+u32 spu2_payload_length(u8 *spu_hdr)
+{
+	struct SPU2_FMD *fmd = (struct SPU2_FMD *)spu_hdr;
+	u32 pl_len;
+	u64 ctrl3;
+
+	ctrl3 = le64_to_cpu(fmd->ctrl3);
+	pl_len = ctrl3 & SPU2_PL_LEN;
+
+	return pl_len;
+}
+
+/**
+ * spu_response_hdr_len() - Determine the expected length of a SPU response
+ * header.
+ * @auth_key_len:  Length of authentication key, in bytes
+ * @enc_key_len:   Length of encryption key, in bytes
+ *
+ * For SPU2, includes just FMD. OMD is never requested.
+ *
+ * Return: Length of FMD, in bytes
+ */
+u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
+{
+	return FMD_SIZE;
+}
+
+/**
+ * spu_hash_pad_len() - Calculate the length of hash padding required to extend
+ * data to a full block size.
+ * @hash_alg:        hash algorithm
+ * @hash_mode:       hash mode
+ * @chunksize:       length of data, in bytes
+ * @hash_block_size: size of a hash block, in bytes
+ *
+ * SPU2 hardware does all hash padding
+ *
+ * Return:  length of hash pad in bytes
+ */
+u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
+		      u32 chunksize, u16 hash_block_size)
+{
+	return 0;
+}
+
+/**
+ * spu2_gcm_ccm_padlen() -  Determine the length of GCM/CCM padding for either
+ * the AAD field or the data.
+ *
+ * Return:  0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
+ */
+u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
+			 unsigned int data_size)
+{
+	return 0;
+}
+
+/**
+ * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
+ * associated data in a SPU2 output packet.
+ * @cipher_mode:   cipher mode
+ * @assoc_len:     length of additional associated data, in bytes
+ * @iv_len:        length of initialization vector, in bytes
+ * @is_encrypt:    true if encrypting. false if decrypt.
+ *
+ * Return: Length of buffer to catch associated data in response
+ */
+u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
+			unsigned int assoc_len, unsigned int iv_len,
+			bool is_encrypt)
+{
+	u32 resp_len = assoc_len;
+
+	if (is_encrypt)
+		/* gcm aes esp has to write 8-byte IV in response */
+		resp_len += iv_len;
+	return resp_len;
+}
+
+/*
+ * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+ * in a SPU request after the AAD and before the payload.
+ * @cipher_mode:  cipher mode
+ * @iv_ctr_len:   initialization vector length in bytes
+ *
+ * For SPU2, AEAD IV is included in OMD and does not need to be repeated
+ * prior to the payload.
+ *
+ * Return: Length of AEAD IV in bytes
+ */
+u8 spu2_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len)
+{
+	return 0;
+}
+
+/**
+ * spu2_hash_type() - Determine the type of hash operation.
+ * @src_sent:  The number of bytes in the current request that have already
+ *             been sent to the SPU to be hashed.
+ *
+ * SPU2 always does a FULL hash operation
+ */
+enum hash_type spu2_hash_type(u32 src_sent)
+{
+	return HASH_TYPE_FULL;
+}
+
+/**
+ * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
+ * return.
+ * alg_digest_size: Number of bytes in the final digest for the given algo
+ * alg:             The hash algorithm
+ * htype:           Type of hash operation (init, update, full, etc)
+ *
+ */
+u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
+		     enum hash_type htype)
+{
+	return alg_digest_size;
+}
+
+/**
+ * spu_create_request() - Build a SPU2 request message header, includint FMD and
+ * OMD.
+ * @spu_hdr: Start of buffer where SPU request header is to be written
+ * @req_opts: SPU request message options
+ * @cipher_parms: Parameters related to cipher algorithm
+ * @hash_parms:   Parameters related to hash algorithm
+ * @aead_parms:   Parameters related to AEAD operation
+ * @data_size:    Length of data to be encrypted or authenticated. If AEAD, does
+ *		  not include length of AAD.
+ *
+ * Construct the message starting at spu_hdr. Caller should allocate this buffer
+ * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
+ *
+ * Return: the length of the SPU header in bytes. 0 if an error occurs.
+ */
+u32 spu2_create_request(u8 *spu_hdr,
+			struct spu_request_opts *req_opts,
+			struct spu_cipher_parms *cipher_parms,
+			struct spu_hash_parms *hash_parms,
+			struct spu_aead_parms *aead_parms,
+			unsigned int data_size)
+{
+	struct SPU2_FMD *fmd;
+	u8 *ptr;
+	unsigned int buf_len;
+	int err;
+	enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
+	enum spu2_cipher_mode spu2_ciph_mode;
+	enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE;
+	enum spu2_hash_mode spu2_auth_mode;
+	bool return_md = true;
+	enum spu2_proto_sel proto = SPU2_PROTO_RESV;
+
+	/* size of the payload */
+	unsigned int payload_len =
+	    hash_parms->prebuf_len + data_size + hash_parms->pad_len -
+	    ((req_opts->is_aead && req_opts->is_inbound) ?
+	     hash_parms->digestsize : 0);
+
+	/* offset of prebuf or data from start of AAD2 */
+	unsigned int cipher_offset = aead_parms->assoc_size +
+			aead_parms->aad_pad_len + aead_parms->iv_len;
+
+#ifdef DEBUG
+	/* total size of the data following OMD (without STAT word padding) */
+	unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
+						 aead_parms->iv_len,
+						 hash_parms->prebuf_len,
+						 data_size,
+						 aead_parms->aad_pad_len,
+						 aead_parms->data_pad_len,
+						 hash_parms->pad_len);
+#endif
+	unsigned int assoc_size = aead_parms->assoc_size;
+
+	if (req_opts->is_aead &&
+	    (cipher_parms->alg == CIPHER_ALG_AES) &&
+	    (cipher_parms->mode == CIPHER_MODE_GCM))
+		/*
+		 * On SPU 2, aes gcm cipher first on encrypt, auth first on
+		 * decrypt
+		 */
+		req_opts->auth_first = req_opts->is_inbound;
+
+	/* and do opposite for ccm (auth 1st on encrypt) */
+	if (req_opts->is_aead &&
+	    (cipher_parms->alg == CIPHER_ALG_AES) &&
+	    (cipher_parms->mode == CIPHER_MODE_CCM))
+		req_opts->auth_first = !req_opts->is_inbound;
+
+	flow_log("%s()\n", __func__);
+	flow_log("  in:%u authFirst:%u\n",
+		 req_opts->is_inbound, req_opts->auth_first);
+	flow_log("  cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
+		 cipher_parms->mode, cipher_parms->type);
+	flow_log("  is_esp: %s\n", req_opts->is_esp ? "yes" : "no");
+	flow_log("    key: %d\n", cipher_parms->key_len);
+	flow_dump("    key: ", cipher_parms->key_buf, cipher_parms->key_len);
+	flow_log("    iv: %d\n", cipher_parms->iv_len);
+	flow_dump("    iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
+	flow_log("  auth alg:%u mode:%u type %u\n",
+		 hash_parms->alg, hash_parms->mode, hash_parms->type);
+	flow_log("  digestsize: %u\n", hash_parms->digestsize);
+	flow_log("  authkey: %d\n", hash_parms->key_len);
+	flow_dump("  authkey: ", hash_parms->key_buf, hash_parms->key_len);
+	flow_log("  assoc_size:%u\n", assoc_size);
+	flow_log("  prebuf_len:%u\n", hash_parms->prebuf_len);
+	flow_log("  data_size:%u\n", data_size);
+	flow_log("  hash_pad_len:%u\n", hash_parms->pad_len);
+	flow_log("  real_db_size:%u\n", real_db_size);
+	flow_log("  cipher_offset:%u payload_len:%u\n",
+		 cipher_offset, payload_len);
+	flow_log("  aead_iv: %u\n", aead_parms->iv_len);
+
+	/* Convert to spu2 values for cipher alg, hash alg */
+	err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
+				cipher_parms->type,
+				&spu2_ciph_type, &spu2_ciph_mode);
+
+	/* If we are doing GCM hashing only - either via rfc4543 transform
+	 * or because we happen to do GCM with AAD only and no payload - we
+	 * need to configure hardware to use hash key rather than cipher key
+	 * and put data into payload.  This is because unlike SPU-M, running
+	 * GCM cipher with 0 size payload is not permitted.
+	 */
+	if ((req_opts->is_rfc4543) ||
+	    ((spu2_ciph_mode == SPU2_CIPHER_MODE_GCM) &&
+	    (payload_len == 0))) {
+		/* Use hashing (only) and set up hash key */
+		spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
+		hash_parms->key_len = cipher_parms->key_len;
+		memcpy(hash_parms->key_buf, cipher_parms->key_buf,
+		       cipher_parms->key_len);
+		cipher_parms->key_len = 0;
+
+		if (req_opts->is_rfc4543)
+			payload_len += assoc_size;
+		else
+			payload_len = assoc_size;
+		cipher_offset = 0;
+		assoc_size = 0;
+	}
+
+	if (err)
+		return 0;
+
+	flow_log("spu2 cipher type %s, cipher mode %s\n",
+		 spu2_ciph_type_name(spu2_ciph_type),
+		 spu2_ciph_mode_name(spu2_ciph_mode));
+
+	err = spu2_hash_xlate(hash_parms->alg, hash_parms->mode,
+			      hash_parms->type,
+			      cipher_parms->type,
+			      &spu2_auth_type, &spu2_auth_mode);
+	if (err)
+		return 0;
+
+	flow_log("spu2 hash type %s, hash mode %s\n",
+		 spu2_hash_type_name(spu2_auth_type),
+		 spu2_hash_mode_name(spu2_auth_mode));
+
+	fmd = (struct SPU2_FMD *)spu_hdr;
+
+	spu2_fmd_ctrl0_write(fmd, req_opts->is_inbound, req_opts->auth_first,
+			     proto, spu2_ciph_type, spu2_ciph_mode,
+			     spu2_auth_type, spu2_auth_mode);
+
+	spu2_fmd_ctrl1_write(fmd, req_opts->is_inbound, assoc_size,
+			     hash_parms->key_len, cipher_parms->key_len,
+			     false, false,
+			     aead_parms->return_iv, aead_parms->ret_iv_len,
+			     aead_parms->ret_iv_off,
+			     cipher_parms->iv_len, hash_parms->digestsize,
+			     !req_opts->bd_suppress, return_md);
+
+	spu2_fmd_ctrl2_write(fmd, cipher_offset, hash_parms->key_len, 0,
+			     cipher_parms->key_len, cipher_parms->iv_len);
+
+	spu2_fmd_ctrl3_write(fmd, payload_len);
+
+	ptr = (u8 *)(fmd + 1);
+	buf_len = sizeof(struct SPU2_FMD);
+
+	/* Write OMD */
+	if (hash_parms->key_len) {
+		memcpy(ptr, hash_parms->key_buf, hash_parms->key_len);
+		ptr += hash_parms->key_len;
+		buf_len += hash_parms->key_len;
+	}
+	if (cipher_parms->key_len) {
+		memcpy(ptr, cipher_parms->key_buf, cipher_parms->key_len);
+		ptr += cipher_parms->key_len;
+		buf_len += cipher_parms->key_len;
+	}
+	if (cipher_parms->iv_len) {
+		memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len);
+		ptr += cipher_parms->iv_len;
+		buf_len += cipher_parms->iv_len;
+	}
+
+	packet_dump("  SPU request header: ", spu_hdr, buf_len);
+
+	return buf_len;
+}
+
+/**
+ * spu_cipher_req_init() - Build an ablkcipher SPU2 request message header,
+ * including FMD and OMD.
+ * @spu_hdr:       Location of start of SPU request (FMD field)
+ * @cipher_parms:  Parameters describing cipher request
+ *
+ * Called at setkey time to initialize a msg header that can be reused for all
+ * subsequent ablkcipher requests. Construct the message starting at spu_hdr.
+ * Caller should allocate this buffer in DMA-able memory at least
+ * SPU_HEADER_ALLOC_LEN bytes long.
+ *
+ * Return: the total length of the SPU header (FMD and OMD) in bytes. 0 if an
+ * error occurs.
+ */
+u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
+{
+	struct SPU2_FMD *fmd;
+	u8 *omd;
+	enum spu2_cipher_type spu2_type = SPU2_CIPHER_TYPE_NONE;
+	enum spu2_cipher_mode spu2_mode;
+	int err;
+
+	flow_log("%s()\n", __func__);
+	flow_log("  cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
+		 cipher_parms->mode, cipher_parms->type);
+	flow_log("  cipher_iv_len: %u\n", cipher_parms->iv_len);
+	flow_log("    key: %d\n", cipher_parms->key_len);
+	flow_dump("    key: ", cipher_parms->key_buf, cipher_parms->key_len);
+
+	/* Convert to spu2 values */
+	err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
+				cipher_parms->type, &spu2_type, &spu2_mode);
+	if (err)
+		return 0;
+
+	flow_log("spu2 cipher type %s, cipher mode %s\n",
+		 spu2_ciph_type_name(spu2_type),
+		 spu2_ciph_mode_name(spu2_mode));
+
+	/* Construct the FMD header */
+	fmd = (struct SPU2_FMD *)spu_hdr;
+	err = spu2_fmd_init(fmd, spu2_type, spu2_mode, cipher_parms->key_len,
+			    cipher_parms->iv_len);
+	if (err)
+		return 0;
+
+	/* Write cipher key to OMD */
+	omd = (u8 *)(fmd + 1);
+	if (cipher_parms->key_buf && cipher_parms->key_len)
+		memcpy(omd, cipher_parms->key_buf, cipher_parms->key_len);
+
+	packet_dump("  SPU request header: ", spu_hdr,
+		    FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len);
+
+	return FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len;
+}
+
+/**
+ * spu_cipher_req_finish() - Finish building a SPU request message header for a
+ * block cipher request.
+ * @spu_hdr:         Start of the request message header (MH field)
+ * @spu_req_hdr_len: Length in bytes of the SPU request header
+ * @isInbound:       0 encrypt, 1 decrypt
+ * @cipher_parms:    Parameters describing cipher operation to be performed
+ * @update_key:      If true, rewrite the cipher key in SCTX
+ * @data_size:       Length of the data in the BD field
+ *
+ * Assumes much of the header was already filled in at setkey() time in
+ * spu_cipher_req_init().
+ * spu_cipher_req_init() fills in the encryption key. For RC4, when submitting a
+ * request for a non-first chunk, we use the 260-byte SUPDT field from the
+ * previous response as the key. update_key is true for this case. Unused in all
+ * other cases.
+ */
+void spu2_cipher_req_finish(u8 *spu_hdr,
+			    u16 spu_req_hdr_len,
+			    unsigned int is_inbound,
+			    struct spu_cipher_parms *cipher_parms,
+			    bool update_key,
+			    unsigned int data_size)
+{
+	struct SPU2_FMD *fmd;
+	u8 *omd;		/* start of optional metadata */
+	u64 ctrl0;
+	u64 ctrl3;
+
+	flow_log("%s()\n", __func__);
+	flow_log(" in: %u\n", is_inbound);
+	flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
+		 cipher_parms->type);
+	if (update_key) {
+		flow_log(" cipher key len: %u\n", cipher_parms->key_len);
+		flow_dump("  key: ", cipher_parms->key_buf,
+			  cipher_parms->key_len);
+	}
+	flow_log(" iv len: %d\n", cipher_parms->iv_len);
+	flow_dump("    iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
+	flow_log(" data_size: %u\n", data_size);
+
+	fmd = (struct SPU2_FMD *)spu_hdr;
+	omd = (u8 *)(fmd + 1);
+
+	/*
+	 * FMD ctrl0 was initialized at setkey time. update it to indicate
+	 * whether we are encrypting or decrypting.
+	 */
+	ctrl0 = le64_to_cpu(fmd->ctrl0);
+	if (is_inbound)
+		ctrl0 &= ~SPU2_CIPH_ENCRYPT_EN;	/* decrypt */
+	else
+		ctrl0 |= SPU2_CIPH_ENCRYPT_EN;	/* encrypt */
+	fmd->ctrl0 = cpu_to_le64(ctrl0);
+
+	if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len) {
+		/* cipher iv provided so put it in here */
+		memcpy(omd + cipher_parms->key_len, cipher_parms->iv_buf,
+		       cipher_parms->iv_len);
+	}
+
+	ctrl3 = le64_to_cpu(fmd->ctrl3);
+	data_size &= SPU2_PL_LEN;
+	ctrl3 |= data_size;
+	fmd->ctrl3 = cpu_to_le64(ctrl3);
+
+	packet_dump("  SPU request header: ", spu_hdr, spu_req_hdr_len);
+}
+
+/**
+ * spu_request_pad() - Create pad bytes at the end of the data.
+ * @pad_start:      Start of buffer where pad bytes are to be written
+ * @gcm_padding:    Length of GCM padding, in bytes
+ * @hash_pad_len:   Number of bytes of padding extend data to full block
+ * @auth_alg:       Authentication algorithm
+ * @auth_mode:      Authentication mode
+ * @total_sent:     Length inserted at end of hash pad
+ * @status_padding: Number of bytes of padding to align STATUS word
+ *
+ * There may be three forms of pad:
+ *  1. GCM pad - for GCM mode ciphers, pad to 16-byte alignment
+ *  2. hash pad - pad to a block length, with 0x80 data terminator and
+ *                size at the end
+ *  3. STAT pad - to ensure the STAT field is 4-byte aligned
+ */
+void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
+		      enum hash_alg auth_alg, enum hash_mode auth_mode,
+		      unsigned int total_sent, u32 status_padding)
+{
+	u8 *ptr = pad_start;
+
+	/* fix data alignent for GCM */
+	if (gcm_padding > 0) {
+		flow_log("  GCM: padding to 16 byte alignment: %u bytes\n",
+			 gcm_padding);
+		memset(ptr, 0, gcm_padding);
+		ptr += gcm_padding;
+	}
+
+	if (hash_pad_len > 0) {
+		/* clear the padding section */
+		memset(ptr, 0, hash_pad_len);
+
+		/* terminate the data */
+		*ptr = 0x80;
+		ptr += (hash_pad_len - sizeof(u64));
+
+		/* add the size at the end as required per alg */
+		if (auth_alg == HASH_ALG_MD5)
+			*(u64 *)ptr = cpu_to_le64((u64)total_sent * 8);
+		else		/* SHA1, SHA2-224, SHA2-256 */
+			*(u64 *)ptr = cpu_to_be64((u64)total_sent * 8);
+		ptr += sizeof(u64);
+	}
+
+	/* pad to a 4byte alignment for STAT */
+	if (status_padding > 0) {
+		flow_log("  STAT: padding to 4 byte alignment: %u bytes\n",
+			 status_padding);
+
+		memset(ptr, 0, status_padding);
+		ptr += status_padding;
+	}
+}
+
+/**
+ * spu2_xts_tweak_in_payload() - Indicate that SPU2 does NOT place the XTS
+ * tweak field in the packet payload (it uses IV instead)
+ *
+ * Return: 0
+ */
+u8 spu2_xts_tweak_in_payload(void)
+{
+	return 0;
+}
+
+/**
+ * spu2_tx_status_len() - Return the length of the STATUS field in a SPU
+ * response message.
+ *
+ * Return: Length of STATUS field in bytes.
+ */
+u8 spu2_tx_status_len(void)
+{
+	return SPU2_TX_STATUS_LEN;
+}
+
+/**
+ * spu2_rx_status_len() - Return the length of the STATUS field in a SPU
+ * response message.
+ *
+ * Return: Length of STATUS field in bytes.
+ */
+u8 spu2_rx_status_len(void)
+{
+	return SPU2_RX_STATUS_LEN;
+}
+
+/**
+ * spu_status_process() - Process the status from a SPU response message.
+ * @statp:  start of STATUS word
+ *
+ * Return:  0 - if status is good and response should be processed
+ *         !0 - status indicates an error and response is invalid
+ */
+int spu2_status_process(u8 *statp)
+{
+	/* SPU2 status is 2 bytes by default - SPU_RX_STATUS_LEN */
+	u16 status = le16_to_cpu(*(__le16 *)statp);
+
+	if (status == 0)
+		return 0;
+
+	flow_log("rx status is %#x\n", status);
+	if (status == SPU2_INVALID_ICV)
+		return SPU_INVALID_ICV;
+
+	return -EBADMSG;
+}
+
+/**
+ * spu2_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
+ *
+ * @digestsize:		Digest size of this request
+ * @cipher_parms:	(pointer to) cipher parmaeters, includes IV buf & IV len
+ * @assoclen:		Length of AAD data
+ * @chunksize:		length of input data to be sent in this req
+ * @is_encrypt:		true if this is an output/encrypt operation
+ * @is_esp:		true if this is an ESP / RFC4309 operation
+ *
+ */
+void spu2_ccm_update_iv(unsigned int digestsize,
+			struct spu_cipher_parms *cipher_parms,
+			unsigned int assoclen, unsigned int chunksize,
+			bool is_encrypt, bool is_esp)
+{
+	int L;  /* size of length field, in bytes */
+
+	/*
+	 * In RFC4309 mode, L is fixed at 4 bytes; otherwise, IV from
+	 * testmgr contains (L-1) in bottom 3 bits of first byte,
+	 * per RFC 3610.
+	 */
+	if (is_esp)
+		L = CCM_ESP_L_VALUE;
+	else
+		L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >>
+		      CCM_B0_L_PRIME_SHIFT) + 1;
+
+	/* SPU2 doesn't want these length bytes nor the first byte... */
+	cipher_parms->iv_len -= (1 + L);
+	memmove(cipher_parms->iv_buf, &cipher_parms->iv_buf[1],
+		cipher_parms->iv_len);
+}
+
+/**
+ * spu2_wordalign_padlen() - SPU2 does not require padding.
+ * @data_size: length of data field in bytes
+ *
+ * Return: length of status field padding, in bytes (always 0 on SPU2)
+ */
+u32 spu2_wordalign_padlen(u32 data_size)
+{
+	return 0;
+}
diff --git a/drivers/crypto/bcm/spu2.h b/drivers/crypto/bcm/spu2.h
new file mode 100644
index 0000000..ab1f599
--- /dev/null
+++ b/drivers/crypto/bcm/spu2.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+/*
+ * This file contains SPU message definitions specific to SPU2.
+ */
+
+#ifndef _SPU2_H
+#define _SPU2_H
+
+enum spu2_cipher_type {
+	SPU2_CIPHER_TYPE_NONE = 0x0,
+	SPU2_CIPHER_TYPE_AES128 = 0x1,
+	SPU2_CIPHER_TYPE_AES192 = 0x2,
+	SPU2_CIPHER_TYPE_AES256 = 0x3,
+	SPU2_CIPHER_TYPE_DES = 0x4,
+	SPU2_CIPHER_TYPE_3DES = 0x5,
+	SPU2_CIPHER_TYPE_LAST
+};
+
+enum spu2_cipher_mode {
+	SPU2_CIPHER_MODE_ECB = 0x0,
+	SPU2_CIPHER_MODE_CBC = 0x1,
+	SPU2_CIPHER_MODE_CTR = 0x2,
+	SPU2_CIPHER_MODE_CFB = 0x3,
+	SPU2_CIPHER_MODE_OFB = 0x4,
+	SPU2_CIPHER_MODE_XTS = 0x5,
+	SPU2_CIPHER_MODE_CCM = 0x6,
+	SPU2_CIPHER_MODE_GCM = 0x7,
+	SPU2_CIPHER_MODE_LAST
+};
+
+enum spu2_hash_type {
+	SPU2_HASH_TYPE_NONE = 0x0,
+	SPU2_HASH_TYPE_AES128 = 0x1,
+	SPU2_HASH_TYPE_AES192 = 0x2,
+	SPU2_HASH_TYPE_AES256 = 0x3,
+	SPU2_HASH_TYPE_MD5 = 0x6,
+	SPU2_HASH_TYPE_SHA1 = 0x7,
+	SPU2_HASH_TYPE_SHA224 = 0x8,
+	SPU2_HASH_TYPE_SHA256 = 0x9,
+	SPU2_HASH_TYPE_SHA384 = 0xa,
+	SPU2_HASH_TYPE_SHA512 = 0xb,
+	SPU2_HASH_TYPE_SHA512_224 = 0xc,
+	SPU2_HASH_TYPE_SHA512_256 = 0xd,
+	SPU2_HASH_TYPE_SHA3_224 = 0xe,
+	SPU2_HASH_TYPE_SHA3_256 = 0xf,
+	SPU2_HASH_TYPE_SHA3_384 = 0x10,
+	SPU2_HASH_TYPE_SHA3_512 = 0x11,
+	SPU2_HASH_TYPE_LAST
+};
+
+enum spu2_hash_mode {
+	SPU2_HASH_MODE_CMAC = 0x0,
+	SPU2_HASH_MODE_CBC_MAC = 0x1,
+	SPU2_HASH_MODE_XCBC_MAC = 0x2,
+	SPU2_HASH_MODE_HMAC = 0x3,
+	SPU2_HASH_MODE_RABIN = 0x4,
+	SPU2_HASH_MODE_CCM = 0x5,
+	SPU2_HASH_MODE_GCM = 0x6,
+	SPU2_HASH_MODE_RESERVED = 0x7,
+	SPU2_HASH_MODE_LAST
+};
+
+enum spu2_ret_md_opts {
+	SPU2_RET_NO_MD = 0,	/* return no metadata */
+	SPU2_RET_FMD_OMD = 1,	/* return both FMD and OMD */
+	SPU2_RET_FMD_ONLY = 2,	/* return only FMD */
+	SPU2_RET_FMD_OMD_IV = 3,	/* return FMD and OMD with just IVs */
+};
+
+/* Fixed Metadata format */
+struct SPU2_FMD {
+	u64 ctrl0;
+	u64 ctrl1;
+	u64 ctrl2;
+	u64 ctrl3;
+};
+
+#define FMD_SIZE  sizeof(struct SPU2_FMD)
+
+/* Fixed part of request message header length in bytes. Just FMD. */
+#define SPU2_REQ_FIXED_LEN FMD_SIZE
+#define SPU2_HEADER_ALLOC_LEN (SPU_REQ_FIXED_LEN + \
+				2 * MAX_KEY_SIZE + 2 * MAX_IV_SIZE)
+
+/* FMD ctrl0 field masks */
+#define SPU2_CIPH_ENCRYPT_EN            0x1 /* 0: decrypt, 1: encrypt */
+#define SPU2_CIPH_TYPE                 0xF0 /* one of spu2_cipher_type */
+#define SPU2_CIPH_TYPE_SHIFT              4
+#define SPU2_CIPH_MODE                0xF00 /* one of spu2_cipher_mode */
+#define SPU2_CIPH_MODE_SHIFT              8
+#define SPU2_CFB_MASK                0x7000 /* cipher feedback mask */
+#define SPU2_CFB_MASK_SHIFT              12
+#define SPU2_PROTO_SEL             0xF00000 /* MACsec, IPsec, TLS... */
+#define SPU2_PROTO_SEL_SHIFT             20
+#define SPU2_HASH_FIRST           0x1000000 /* 1: hash input is input pkt
+					     * data
+					     */
+#define SPU2_CHK_TAG              0x2000000 /* 1: check digest provided */
+#define SPU2_HASH_TYPE          0x1F0000000 /* one of spu2_hash_type */
+#define SPU2_HASH_TYPE_SHIFT             28
+#define SPU2_HASH_MODE         0xF000000000 /* one of spu2_hash_mode */
+#define SPU2_HASH_MODE_SHIFT             36
+#define SPU2_CIPH_PAD_EN     0x100000000000 /* 1: Add pad to end of payload for
+					     *    enc
+					     */
+#define SPU2_CIPH_PAD      0xFF000000000000 /* cipher pad value */
+#define SPU2_CIPH_PAD_SHIFT              48
+
+/* FMD ctrl1 field masks */
+#define SPU2_TAG_LOC                    0x1 /* 1: end of payload, 0: undef */
+#define SPU2_HAS_FR_DATA                0x2 /* 1: msg has frame data */
+#define SPU2_HAS_AAD1                   0x4 /* 1: msg has AAD1 field */
+#define SPU2_HAS_NAAD                   0x8 /* 1: msg has NAAD field */
+#define SPU2_HAS_AAD2                  0x10 /* 1: msg has AAD2 field */
+#define SPU2_HAS_ESN                   0x20 /* 1: msg has ESN field */
+#define SPU2_HASH_KEY_LEN            0xFF00 /* len of hash key in bytes.
+					     * HMAC only.
+					     */
+#define SPU2_HASH_KEY_LEN_SHIFT           8
+#define SPU2_CIPH_KEY_LEN         0xFF00000 /* len of cipher key in bytes */
+#define SPU2_CIPH_KEY_LEN_SHIFT          20
+#define SPU2_GENIV               0x10000000 /* 1: hw generates IV */
+#define SPU2_HASH_IV             0x20000000 /* 1: IV incl in hash */
+#define SPU2_RET_IV              0x40000000 /* 1: return IV in output msg
+					     *    b4 payload
+					     */
+#define SPU2_RET_IV_LEN         0xF00000000 /* length in bytes of IV returned.
+					     * 0 = 16 bytes
+					     */
+#define SPU2_RET_IV_LEN_SHIFT            32
+#define SPU2_IV_OFFSET         0xF000000000 /* gen IV offset */
+#define SPU2_IV_OFFSET_SHIFT             36
+#define SPU2_IV_LEN          0x1F0000000000 /* length of input IV in bytes */
+#define SPU2_IV_LEN_SHIFT                40
+#define SPU2_HASH_TAG_LEN  0x7F000000000000 /* hash tag length in bytes */
+#define SPU2_HASH_TAG_LEN_SHIFT          48
+#define SPU2_RETURN_MD    0x300000000000000 /* return metadata */
+#define SPU2_RETURN_MD_SHIFT             56
+#define SPU2_RETURN_FD    0x400000000000000
+#define SPU2_RETURN_AAD1  0x800000000000000
+#define SPU2_RETURN_NAAD 0x1000000000000000
+#define SPU2_RETURN_AAD2 0x2000000000000000
+#define SPU2_RETURN_PAY  0x4000000000000000 /* return payload */
+
+/* FMD ctrl2 field masks */
+#define SPU2_AAD1_OFFSET              0xFFF /* byte offset of AAD1 field */
+#define SPU2_AAD1_LEN               0xFF000 /* length of AAD1 in bytes */
+#define SPU2_AAD1_LEN_SHIFT              12
+#define SPU2_AAD2_OFFSET         0xFFF00000 /* byte offset of AAD2 field */
+#define SPU2_AAD2_OFFSET_SHIFT           20
+#define SPU2_PL_OFFSET   0xFFFFFFFF00000000 /* payload offset from AAD2 */
+#define SPU2_PL_OFFSET_SHIFT             32
+
+/* FMD ctrl3 field masks */
+#define SPU2_PL_LEN              0xFFFFFFFF /* payload length in bytes */
+#define SPU2_TLS_LEN         0xFFFF00000000 /* TLS encrypt: cipher len
+					     * TLS decrypt: compressed len
+					     */
+#define SPU2_TLS_LEN_SHIFT               32
+
+/*
+ * Max value that can be represented in the Payload Length field of the
+ * ctrl3 word of FMD.
+ */
+#define SPU2_MAX_PAYLOAD  SPU2_PL_LEN
+
+/* Error values returned in STATUS field of response messages */
+#define SPU2_INVALID_ICV  1
+
+void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len);
+u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
+			 enum spu_cipher_mode cipher_mode,
+			 unsigned int blocksize);
+u32 spu2_payload_length(u8 *spu_hdr);
+u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash);
+u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
+		      u32 chunksize, u16 hash_block_size);
+u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
+			 unsigned int data_size);
+u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
+			unsigned int assoc_len, unsigned int iv_len,
+			bool is_encrypt);
+u8 spu2_aead_ivlen(enum spu_cipher_mode cipher_mode,
+		   u16 iv_len);
+enum hash_type spu2_hash_type(u32 src_sent);
+u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
+		     enum hash_type htype);
+u32 spu2_create_request(u8 *spu_hdr,
+			struct spu_request_opts *req_opts,
+			struct spu_cipher_parms *cipher_parms,
+			struct spu_hash_parms *hash_parms,
+			struct spu_aead_parms *aead_parms,
+			unsigned int data_size);
+u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms);
+void spu2_cipher_req_finish(u8 *spu_hdr,
+			    u16 spu_req_hdr_len,
+			    unsigned int is_inbound,
+			    struct spu_cipher_parms *cipher_parms,
+			    bool update_key,
+			    unsigned int data_size);
+void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
+		      enum hash_alg auth_alg, enum hash_mode auth_mode,
+		      unsigned int total_sent, u32 status_padding);
+u8 spu2_xts_tweak_in_payload(void);
+u8 spu2_tx_status_len(void);
+u8 spu2_rx_status_len(void);
+int spu2_status_process(u8 *statp);
+void spu2_ccm_update_iv(unsigned int digestsize,
+			struct spu_cipher_parms *cipher_parms,
+			unsigned int assoclen, unsigned int chunksize,
+			bool is_encrypt, bool is_esp);
+u32 spu2_wordalign_padlen(u32 data_size);
+#endif
diff --git a/drivers/crypto/bcm/spum.h b/drivers/crypto/bcm/spum.h
new file mode 100644
index 0000000..d0a5b58
--- /dev/null
+++ b/drivers/crypto/bcm/spum.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+/*
+ * This file contains SPU message definitions specific to SPU-M.
+ */
+
+#ifndef _SPUM_H_
+#define _SPUM_H_
+
+#define SPU_CRYPTO_OPERATION_GENERIC	0x1
+
+/* Length of STATUS field in tx and rx packets */
+#define SPU_TX_STATUS_LEN  4
+
+/* SPU-M error codes */
+#define SPU_STATUS_MASK                 0x0000FF00
+#define SPU_STATUS_SUCCESS              0x00000000
+#define SPU_STATUS_INVALID_ICV          0x00000100
+
+#define SPU_STATUS_ERROR_FLAG           0x00020000
+
+/* Request message. MH + EMH + BDESC + BD header */
+#define SPU_REQ_FIXED_LEN 24
+
+/*
+ * Max length of a SPU message header. Used to allocate a buffer where
+ * the SPU message header is constructed. Can be used for either a SPU-M
+ * header or a SPU2 header.
+ * For SPU-M, sum of the following:
+ *    MH - 4 bytes
+ *    EMH - 4
+ *    SCTX - 3 +
+ *      max auth key len - 64
+ *      max cipher key len - 264 (RC4)
+ *      max IV len - 16
+ *    BDESC - 12
+ *    BD header - 4
+ * Total:  371
+ *
+ * For SPU2, FMD_SIZE (32) plus lengths of hash and cipher keys,
+ * hash and cipher IVs. If SPU2 does not support RC4, then
+ */
+#define SPU_HEADER_ALLOC_LEN  (SPU_REQ_FIXED_LEN + MAX_KEY_SIZE + \
+				MAX_KEY_SIZE + MAX_IV_SIZE)
+
+/*
+ * Response message header length. Normally MH, EMH, BD header, but when
+ * BD_SUPPRESS is used for hash requests, there is no BD header.
+ */
+#define SPU_RESP_HDR_LEN 12
+#define SPU_HASH_RESP_HDR_LEN 8
+
+/*
+ * Max value that can be represented in the Payload Length field of the BD
+ * header. This is a 16-bit field.
+ */
+#define SPUM_NS2_MAX_PAYLOAD  (BIT(16) - 1)
+
+/*
+ * NSP SPU is limited to ~9KB because of FA2 FIFO size limitations;
+ * Set MAX_PAYLOAD to 8k to allow for addition of header, digest, etc.
+ * and stay within limitation.
+ */
+
+#define SPUM_NSP_MAX_PAYLOAD	8192
+
+/* Buffer Descriptor Header [BDESC]. SPU in big-endian mode. */
+struct BDESC_HEADER {
+	u16 offset_mac;		/* word 0 [31-16] */
+	u16 length_mac;		/* word 0 [15-0]  */
+	u16 offset_crypto;	/* word 1 [31-16] */
+	u16 length_crypto;	/* word 1 [15-0]  */
+	u16 offset_icv;		/* word 2 [31-16] */
+	u16 offset_iv;		/* word 2 [15-0]  */
+};
+
+/* Buffer Data Header [BD]. SPU in big-endian mode. */
+struct BD_HEADER {
+	u16 size;
+	u16 prev_length;
+};
+
+/* Command Context Header. SPU-M in big endian mode. */
+struct MHEADER {
+	u8 flags;	/* [31:24] */
+	u8 op_code;	/* [23:16] */
+	u16 reserved;	/* [15:0] */
+};
+
+/* MH header flags bits */
+#define MH_SUPDT_PRES   BIT(0)
+#define MH_HASH_PRES    BIT(2)
+#define MH_BD_PRES      BIT(3)
+#define MH_MFM_PRES     BIT(4)
+#define MH_BDESC_PRES   BIT(5)
+#define MH_SCTX_PRES	BIT(7)
+
+/* SCTX word 0 bit offsets and fields masks */
+#define SCTX_SIZE               0x000000FF
+
+/* SCTX word 1 bit shifts and field masks */
+#define  UPDT_OFST              0x000000FF   /* offset of SCTX updateable fld */
+#define  HASH_TYPE              0x00000300   /* hash alg operation type */
+#define  HASH_TYPE_SHIFT                 8
+#define  HASH_MODE              0x00001C00   /* one of spu2_hash_mode */
+#define  HASH_MODE_SHIFT                10
+#define  HASH_ALG               0x0000E000   /* hash algorithm */
+#define  HASH_ALG_SHIFT                 13
+#define  CIPHER_TYPE            0x00030000   /* encryption operation type */
+#define  CIPHER_TYPE_SHIFT              16
+#define  CIPHER_MODE            0x001C0000   /* encryption mode */
+#define  CIPHER_MODE_SHIFT              18
+#define  CIPHER_ALG             0x00E00000   /* encryption algo */
+#define  CIPHER_ALG_SHIFT               21
+#define  ICV_IS_512                BIT(27)
+#define  ICV_IS_512_SHIFT		27
+#define  CIPHER_ORDER               BIT(30)
+#define  CIPHER_ORDER_SHIFT             30
+#define  CIPHER_INBOUND             BIT(31)
+#define  CIPHER_INBOUND_SHIFT           31
+
+/* SCTX word 2 bit shifts and field masks */
+#define  EXP_IV_SIZE                   0x7
+#define  IV_OFFSET                   BIT(3)
+#define  IV_OFFSET_SHIFT                 3
+#define  GEN_IV                      BIT(5)
+#define  GEN_IV_SHIFT                    5
+#define  EXPLICIT_IV                 BIT(6)
+#define  EXPLICIT_IV_SHIFT               6
+#define  SCTX_IV                     BIT(7)
+#define  SCTX_IV_SHIFT                   7
+#define  ICV_SIZE                   0x0F00
+#define  ICV_SIZE_SHIFT                  8
+#define  CHECK_ICV                  BIT(12)
+#define  CHECK_ICV_SHIFT                12
+#define  INSERT_ICV                 BIT(13)
+#define  INSERT_ICV_SHIFT               13
+#define  BD_SUPPRESS                BIT(19)
+#define  BD_SUPPRESS_SHIFT              19
+
+/* Generic Mode Security Context Structure [SCTX] */
+struct SCTX {
+/* word 0: protocol flags */
+	u32 proto_flags;
+
+/* word 1: cipher flags */
+	u32 cipher_flags;
+
+/* word 2: Extended cipher flags */
+	u32 ecf;
+
+};
+
+struct SPUHEADER {
+	struct MHEADER mh;
+	u32 emh;
+	struct SCTX sa;
+};
+
+#endif /* _SPUM_H_ */
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
new file mode 100644
index 0000000..a912c6a
--- /dev/null
+++ b/drivers/crypto/bcm/util.c
@@ -0,0 +1,580 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/debugfs.h>
+
+#include "cipher.h"
+#include "util.h"
+
+/* offset of SPU_OFIFO_CTRL register */
+#define SPU_OFIFO_CTRL      0x40
+#define SPU_FIFO_WATERMARK  0x1FF
+
+/**
+ * spu_sg_at_offset() - Find the scatterlist entry at a given distance from the
+ * start of a scatterlist.
+ * @sg:         [in]  Start of a scatterlist
+ * @skip:       [in]  Distance from the start of the scatterlist, in bytes
+ * @sge:        [out] Scatterlist entry at skip bytes from start
+ * @sge_offset: [out] Number of bytes from start of sge buffer to get to
+ *                    requested distance.
+ *
+ * Return: 0 if entry found at requested distance
+ *         < 0 otherwise
+ */
+int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip,
+		     struct scatterlist **sge, unsigned int *sge_offset)
+{
+	/* byte index from start of sg to the end of the previous entry */
+	unsigned int index = 0;
+	/* byte index from start of sg to the end of the current entry */
+	unsigned int next_index;
+
+	next_index = sg->length;
+	while (next_index <= skip) {
+		sg = sg_next(sg);
+		index = next_index;
+		if (!sg)
+			return -EINVAL;
+		next_index += sg->length;
+	}
+
+	*sge_offset = skip - index;
+	*sge = sg;
+	return 0;
+}
+
+/* Copy len bytes of sg data, starting at offset skip, to a dest buffer */
+void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest,
+			 unsigned int len, unsigned int skip)
+{
+	size_t copied;
+	unsigned int nents = sg_nents(src);
+
+	copied = sg_pcopy_to_buffer(src, nents, dest, len, skip);
+	if (copied != len) {
+		flow_log("%s copied %u bytes of %u requested. ",
+			 __func__, (u32)copied, len);
+		flow_log("sg with %u entries and skip %u\n", nents, skip);
+	}
+}
+
+/*
+ * Copy data into a scatterlist starting at a specified offset in the
+ * scatterlist. Specifically, copy len bytes of data in the buffer src
+ * into the scatterlist dest, starting skip bytes into the scatterlist.
+ */
+void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src,
+			   unsigned int len, unsigned int skip)
+{
+	size_t copied;
+	unsigned int nents = sg_nents(dest);
+
+	copied = sg_pcopy_from_buffer(dest, nents, src, len, skip);
+	if (copied != len) {
+		flow_log("%s copied %u bytes of %u requested. ",
+			 __func__, (u32)copied, len);
+		flow_log("sg with %u entries and skip %u\n", nents, skip);
+	}
+}
+
+/**
+ * spu_sg_count() - Determine number of elements in scatterlist to provide a
+ * specified number of bytes.
+ * @sg_list:  scatterlist to examine
+ * @skip:     index of starting point
+ * @nbytes:   consider elements of scatterlist until reaching this number of
+ *	      bytes
+ *
+ * Return: the number of sg entries contributing to nbytes of data
+ */
+int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
+{
+	struct scatterlist *sg;
+	int sg_nents = 0;
+	unsigned int offset;
+
+	if (!sg_list)
+		return 0;
+
+	if (spu_sg_at_offset(sg_list, skip, &sg, &offset) < 0)
+		return 0;
+
+	while (sg && (nbytes > 0)) {
+		sg_nents++;
+		nbytes -= (sg->length - offset);
+		offset = 0;
+		sg = sg_next(sg);
+	}
+	return sg_nents;
+}
+
+/**
+ * spu_msg_sg_add() - Copy scatterlist entries from one sg to another, up to a
+ * given length.
+ * @to_sg:       scatterlist to copy to
+ * @from_sg:     scatterlist to copy from
+ * @from_skip:   number of bytes to skip in from_sg. Non-zero when previous
+ *		 request included part of the buffer in entry in from_sg.
+ *		 Assumes from_skip < from_sg->length.
+ * @from_nents   number of entries in from_sg
+ * @length       number of bytes to copy. may reach this limit before exhausting
+ *		 from_sg.
+ *
+ * Copies the entries themselves, not the data in the entries. Assumes to_sg has
+ * enough entries. Does not limit the size of an individual buffer in to_sg.
+ *
+ * to_sg, from_sg, skip are all updated to end of copy
+ *
+ * Return: Number of bytes copied
+ */
+u32 spu_msg_sg_add(struct scatterlist **to_sg,
+		   struct scatterlist **from_sg, u32 *from_skip,
+		   u8 from_nents, u32 length)
+{
+	struct scatterlist *sg;	/* an entry in from_sg */
+	struct scatterlist *to = *to_sg;
+	struct scatterlist *from = *from_sg;
+	u32 skip = *from_skip;
+	u32 offset;
+	int i;
+	u32 entry_len = 0;
+	u32 frag_len = 0;	/* length of entry added to to_sg */
+	u32 copied = 0;		/* number of bytes copied so far */
+
+	if (length == 0)
+		return 0;
+
+	for_each_sg(from, sg, from_nents, i) {
+		/* number of bytes in this from entry not yet used */
+		entry_len = sg->length - skip;
+		frag_len = min(entry_len, length - copied);
+		offset = sg->offset + skip;
+		if (frag_len)
+			sg_set_page(to++, sg_page(sg), frag_len, offset);
+		copied += frag_len;
+		if (copied == entry_len) {
+			/* used up all of from entry */
+			skip = 0;	/* start at beginning of next entry */
+		}
+		if (copied == length)
+			break;
+	}
+	*to_sg = to;
+	*from_sg = sg;
+	if (frag_len < entry_len)
+		*from_skip = skip + frag_len;
+	else
+		*from_skip = 0;
+
+	return copied;
+}
+
+void add_to_ctr(u8 *ctr_pos, unsigned int increment)
+{
+	__be64 *high_be = (__be64 *)ctr_pos;
+	__be64 *low_be = high_be + 1;
+	u64 orig_low = __be64_to_cpu(*low_be);
+	u64 new_low = orig_low + (u64)increment;
+
+	*low_be = __cpu_to_be64(new_low);
+	if (new_low < orig_low)
+		/* there was a carry from the low 8 bytes */
+		*high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
+}
+
+struct sdesc {
+	struct shash_desc shash;
+	char ctx[];
+};
+
+/* do a synchronous decrypt operation */
+int do_decrypt(char *alg_name,
+	       void *key_ptr, unsigned int key_len,
+	       void *iv_ptr, void *src_ptr, void *dst_ptr,
+	       unsigned int block_len)
+{
+	struct scatterlist sg_in[1], sg_out[1];
+	struct crypto_blkcipher *tfm =
+	    crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
+	struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 };
+	int ret = 0;
+	void *iv;
+	int ivsize;
+
+	flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len);
+
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len);
+
+	sg_init_table(sg_in, 1);
+	sg_set_buf(sg_in, src_ptr, block_len);
+
+	sg_init_table(sg_out, 1);
+	sg_set_buf(sg_out, dst_ptr, block_len);
+
+	iv = crypto_blkcipher_crt(tfm)->iv;
+	ivsize = crypto_blkcipher_ivsize(tfm);
+	memcpy(iv, iv_ptr, ivsize);
+
+	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len);
+	crypto_free_blkcipher(tfm);
+
+	if (ret < 0)
+		pr_err("aes_decrypt failed %d\n", ret);
+
+	return ret;
+}
+
+/**
+ * do_shash() - Do a synchronous hash operation in software
+ * @name:       The name of the hash algorithm
+ * @result:     Buffer where digest is to be written
+ * @data1:      First part of data to hash. May be NULL.
+ * @data1_len:  Length of data1, in bytes
+ * @data2:      Second part of data to hash. May be NULL.
+ * @data2_len:  Length of data2, in bytes
+ * @key:	Key (if keyed hash)
+ * @key_len:	Length of key, in bytes (or 0 if non-keyed hash)
+ *
+ * Note that the crypto API will not select this driver's own transform because
+ * this driver only registers asynchronous algos.
+ *
+ * Return: 0 if hash successfully stored in result
+ *         < 0 otherwise
+ */
+int do_shash(unsigned char *name, unsigned char *result,
+	     const u8 *data1, unsigned int data1_len,
+	     const u8 *data2, unsigned int data2_len,
+	     const u8 *key, unsigned int key_len)
+{
+	int rc;
+	unsigned int size;
+	struct crypto_shash *hash;
+	struct sdesc *sdesc;
+
+	hash = crypto_alloc_shash(name, 0, 0);
+	if (IS_ERR(hash)) {
+		rc = PTR_ERR(hash);
+		pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc);
+		return rc;
+	}
+
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
+	sdesc = kmalloc(size, GFP_KERNEL);
+	if (!sdesc) {
+		rc = -ENOMEM;
+		goto do_shash_err;
+	}
+	sdesc->shash.tfm = hash;
+	sdesc->shash.flags = 0x0;
+
+	if (key_len > 0) {
+		rc = crypto_shash_setkey(hash, key, key_len);
+		if (rc) {
+			pr_err("%s: Could not setkey %s shash\n", __func__, name);
+			goto do_shash_err;
+		}
+	}
+
+	rc = crypto_shash_init(&sdesc->shash);
+	if (rc) {
+		pr_err("%s: Could not init %s shash\n", __func__, name);
+		goto do_shash_err;
+	}
+	rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
+	if (rc) {
+		pr_err("%s: Could not update1\n", __func__);
+		goto do_shash_err;
+	}
+	if (data2 && data2_len) {
+		rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
+		if (rc) {
+			pr_err("%s: Could not update2\n", __func__);
+			goto do_shash_err;
+		}
+	}
+	rc = crypto_shash_final(&sdesc->shash, result);
+	if (rc)
+		pr_err("%s: Could not generate %s hash\n", __func__, name);
+
+do_shash_err:
+	crypto_free_shash(hash);
+	kfree(sdesc);
+
+	return rc;
+}
+
+/* Dump len bytes of a scatterlist starting at skip bytes into the sg */
+void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len)
+{
+	u8 dbuf[16];
+	unsigned int idx = skip;
+	unsigned int num_out = 0;	/* number of bytes dumped so far */
+	unsigned int count;
+
+	if (packet_debug_logging) {
+		while (num_out < len) {
+			count = (len - num_out > 16) ? 16 : len - num_out;
+			sg_copy_part_to_buf(sg, dbuf, count, idx);
+			num_out += count;
+			print_hex_dump(KERN_ALERT, "  sg: ", DUMP_PREFIX_NONE,
+				       4, 1, dbuf, count, false);
+			idx += 16;
+		}
+	}
+	if (debug_logging_sleep)
+		msleep(debug_logging_sleep);
+}
+
+/* Returns the name for a given cipher alg/mode */
+char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
+{
+	switch (alg) {
+	case CIPHER_ALG_RC4:
+		return "rc4";
+	case CIPHER_ALG_AES:
+		switch (mode) {
+		case CIPHER_MODE_CBC:
+			return "cbc(aes)";
+		case CIPHER_MODE_ECB:
+			return "ecb(aes)";
+		case CIPHER_MODE_OFB:
+			return "ofb(aes)";
+		case CIPHER_MODE_CFB:
+			return "cfb(aes)";
+		case CIPHER_MODE_CTR:
+			return "ctr(aes)";
+		case CIPHER_MODE_XTS:
+			return "xts(aes)";
+		case CIPHER_MODE_GCM:
+			return "gcm(aes)";
+		default:
+			return "aes";
+		}
+		break;
+	case CIPHER_ALG_DES:
+		switch (mode) {
+		case CIPHER_MODE_CBC:
+			return "cbc(des)";
+		case CIPHER_MODE_ECB:
+			return "ecb(des)";
+		case CIPHER_MODE_CTR:
+			return "ctr(des)";
+		default:
+			return "des";
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		switch (mode) {
+		case CIPHER_MODE_CBC:
+			return "cbc(des3_ede)";
+		case CIPHER_MODE_ECB:
+			return "ecb(des3_ede)";
+		case CIPHER_MODE_CTR:
+			return "ctr(des3_ede)";
+		default:
+			return "3des";
+		}
+		break;
+	default:
+		return "other";
+	}
+}
+
+static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
+				size_t count, loff_t *offp)
+{
+	struct device_private *ipriv;
+	char *buf;
+	ssize_t ret, out_offset, out_count;
+	int i;
+	u32 fifo_len;
+	u32 spu_ofifo_ctrl;
+	u32 alg;
+	u32 mode;
+	u32 op_cnt;
+
+	out_count = 2048;
+
+	buf = kmalloc(out_count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ipriv = filp->private_data;
+	out_offset = 0;
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Number of SPUs.........%u\n",
+			       ipriv->spu.num_spu);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Current sessions.......%u\n",
+			       atomic_read(&ipriv->session_count));
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Session count..........%u\n",
+			       atomic_read(&ipriv->stream_count));
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Cipher setkey..........%u\n",
+			       atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER]));
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Cipher Ops.............%u\n",
+			       atomic_read(&ipriv->op_counts[SPU_OP_CIPHER]));
+	for (alg = 0; alg < CIPHER_ALG_LAST; alg++) {
+		for (mode = 0; mode < CIPHER_MODE_LAST; mode++) {
+			op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]);
+			if (op_cnt) {
+				out_offset += snprintf(buf + out_offset,
+						       out_count - out_offset,
+			       "  %-13s%11u\n",
+			       spu_alg_name(alg, mode), op_cnt);
+			}
+		}
+	}
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Hash Ops...............%u\n",
+			       atomic_read(&ipriv->op_counts[SPU_OP_HASH]));
+	for (alg = 0; alg < HASH_ALG_LAST; alg++) {
+		op_cnt = atomic_read(&ipriv->hash_cnt[alg]);
+		if (op_cnt) {
+			out_offset += snprintf(buf + out_offset,
+					       out_count - out_offset,
+		       "  %-13s%11u\n",
+		       hash_alg_name[alg], op_cnt);
+		}
+	}
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "HMAC setkey............%u\n",
+			       atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC]));
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "HMAC Ops...............%u\n",
+			       atomic_read(&ipriv->op_counts[SPU_OP_HMAC]));
+	for (alg = 0; alg < HASH_ALG_LAST; alg++) {
+		op_cnt = atomic_read(&ipriv->hmac_cnt[alg]);
+		if (op_cnt) {
+			out_offset += snprintf(buf + out_offset,
+					       out_count - out_offset,
+		       "  %-13s%11u\n",
+		       hash_alg_name[alg], op_cnt);
+		}
+	}
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "AEAD setkey............%u\n",
+			       atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD]));
+
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "AEAD Ops...............%u\n",
+			       atomic_read(&ipriv->op_counts[SPU_OP_AEAD]));
+	for (alg = 0; alg < AEAD_TYPE_LAST; alg++) {
+		op_cnt = atomic_read(&ipriv->aead_cnt[alg]);
+		if (op_cnt) {
+			out_offset += snprintf(buf + out_offset,
+					       out_count - out_offset,
+		       "  %-13s%11u\n",
+		       aead_alg_name[alg], op_cnt);
+		}
+	}
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Bytes of req data......%llu\n",
+			       (u64)atomic64_read(&ipriv->bytes_out));
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Bytes of resp data.....%llu\n",
+			       (u64)atomic64_read(&ipriv->bytes_in));
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Mailbox full...........%u\n",
+			       atomic_read(&ipriv->mb_no_spc));
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Mailbox send failures..%u\n",
+			       atomic_read(&ipriv->mb_send_fail));
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "Check ICV errors.......%u\n",
+			       atomic_read(&ipriv->bad_icv));
+	if (ipriv->spu.spu_type == SPU_TYPE_SPUM)
+		for (i = 0; i < ipriv->spu.num_spu; i++) {
+			spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] +
+						  SPU_OFIFO_CTRL);
+			fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK;
+			out_offset += snprintf(buf + out_offset,
+					       out_count - out_offset,
+				       "SPU %d output FIFO high water.....%u\n",
+				       i, fifo_len);
+		}
+
+	if (out_offset > out_count)
+		out_offset = out_count;
+
+	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations spu_debugfs_stats = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = spu_debugfs_read,
+};
+
+/*
+ * Create the debug FS directories. If the top-level directory has not yet
+ * been created, create it now. Create a stats file in this directory for
+ * a SPU.
+ */
+void spu_setup_debugfs(void)
+{
+	if (!debugfs_initialized())
+		return;
+
+	if (!iproc_priv.debugfs_dir)
+		iproc_priv.debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
+							    NULL);
+
+	if (!iproc_priv.debugfs_stats)
+		/* Create file with permissions S_IRUSR */
+		debugfs_create_file("stats", 0400, iproc_priv.debugfs_dir,
+				    &iproc_priv, &spu_debugfs_stats);
+}
+
+void spu_free_debugfs(void)
+{
+	debugfs_remove_recursive(iproc_priv.debugfs_dir);
+	iproc_priv.debugfs_dir = NULL;
+}
+
+/**
+ * format_value_ccm() - Format a value into a buffer, using a specified number
+ *			of bytes (i.e. maybe writing value X into a 4 byte
+ *			buffer, or maybe into a 12 byte buffer), as per the
+ *			SPU CCM spec.
+ *
+ * @val:		value to write (up to max of unsigned int)
+ * @buf:		(pointer to) buffer to write the value
+ * @len:		number of bytes to use (0 to 255)
+ *
+ */
+void format_value_ccm(unsigned int val, u8 *buf, u8 len)
+{
+	int i;
+
+	/* First clear full output buffer */
+	memset(buf, 0, len);
+
+	/* Then, starting from right side, fill in with data */
+	for (i = 0; i < len; i++) {
+		buf[len - i - 1] = (val >> (8 * i)) & 0xff;
+		if (i >= 3)
+			break;  /* Only handle up to 32 bits of 'val' */
+	}
+}
diff --git a/drivers/crypto/bcm/util.h b/drivers/crypto/bcm/util.h
new file mode 100644
index 0000000..712e029
--- /dev/null
+++ b/drivers/crypto/bcm/util.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#ifndef _UTIL_H
+#define _UTIL_H
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include "spu.h"
+
+extern int flow_debug_logging;
+extern int packet_debug_logging;
+extern int debug_logging_sleep;
+
+#ifdef DEBUG
+#define flow_log(...)	                \
+	do {	                              \
+		if (flow_debug_logging) {	        \
+			printk(__VA_ARGS__);	          \
+			if (debug_logging_sleep)	      \
+				msleep(debug_logging_sleep);	\
+		}	                                \
+	} while (0)
+#define flow_dump(msg, var, var_len)	   \
+	do {	                                 \
+		if (flow_debug_logging) {	           \
+			print_hex_dump(KERN_ALERT, msg, DUMP_PREFIX_NONE,  \
+					16, 1, var, var_len, false); \
+				if (debug_logging_sleep)	       \
+					msleep(debug_logging_sleep);   \
+		}                                    \
+	} while (0)
+
+#define packet_log(...)               \
+	do {                                \
+		if (packet_debug_logging) {       \
+			printk(__VA_ARGS__);            \
+			if (debug_logging_sleep)        \
+				msleep(debug_logging_sleep);  \
+		}                                 \
+	} while (0)
+#define packet_dump(msg, var, var_len)   \
+	do {                                   \
+		if (packet_debug_logging) {          \
+			print_hex_dump(KERN_ALERT, msg, DUMP_PREFIX_NONE,  \
+					16, 1, var, var_len, false); \
+			if (debug_logging_sleep)           \
+				msleep(debug_logging_sleep);     \
+		}                                    \
+	} while (0)
+
+void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len);
+
+#define dump_sg(sg, skip, len)     __dump_sg(sg, skip, len)
+
+#else /* !DEBUG_ON */
+
+#define flow_log(...) do {} while (0)
+#define flow_dump(msg, var, var_len) do {} while (0)
+#define packet_log(...) do {} while (0)
+#define packet_dump(msg, var, var_len) do {} while (0)
+
+#define dump_sg(sg, skip, len) do {} while (0)
+
+#endif /* DEBUG_ON */
+
+int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip,
+		     struct scatterlist **sge, unsigned int *sge_offset);
+
+/* Copy sg data, from skip, length len, to dest */
+void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest,
+			 unsigned int len, unsigned int skip);
+/* Copy src into scatterlist from offset, length len */
+void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src,
+			   unsigned int len, unsigned int skip);
+
+int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes);
+u32 spu_msg_sg_add(struct scatterlist **to_sg,
+		   struct scatterlist **from_sg, u32 *skip,
+		   u8 from_nents, u32 tot_len);
+
+void add_to_ctr(u8 *ctr_pos, unsigned int increment);
+
+/* do a synchronous decrypt operation */
+int do_decrypt(char *alg_name,
+	       void *key_ptr, unsigned int key_len,
+	       void *iv_ptr, void *src_ptr, void *dst_ptr,
+	       unsigned int block_len);
+
+/* produce a message digest from data of length n bytes */
+int do_shash(unsigned char *name, unsigned char *result,
+	     const u8 *data1, unsigned int data1_len,
+	     const u8 *data2, unsigned int data2_len,
+	     const u8 *key, unsigned int key_len);
+
+char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode);
+
+void spu_setup_debugfs(void);
+void spu_free_debugfs(void);
+void format_value_ccm(unsigned int val, u8 *buf, u8 len);
+
+#endif
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
new file mode 100644
index 0000000..1eb8527
--- /dev/null
+++ b/drivers/crypto/caam/Kconfig
@@ -0,0 +1,154 @@
+config CRYPTO_DEV_FSL_CAAM
+	tristate "Freescale CAAM-Multicore driver backend"
+	depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+	select SOC_BUS
+	help
+	  Enables the driver module for Freescale's Cryptographic Accelerator
+	  and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
+	  This module creates job ring devices, and configures h/w
+	  to operate as a DPAA component automatically, depending
+	  on h/w feature availability.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called caam.
+
+config CRYPTO_DEV_FSL_CAAM_JR
+	tristate "Freescale CAAM Job Ring driver backend"
+	depends on CRYPTO_DEV_FSL_CAAM
+	default y
+	help
+	  Enables the driver module for Job Rings which are part of
+	  Freescale's Cryptographic Accelerator
+	  and Assurance Module (CAAM). This module adds a job ring operation
+	  interface.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called caam_jr.
+
+config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+	int "Job Ring size"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	range 2 9
+	default "9"
+	help
+	  Select size of Job Rings as a power of 2, within the
+	  range 2-9 (ring size 4-512).
+	  Examples:
+		2 => 4
+		3 => 8
+		4 => 16
+		5 => 32
+		6 => 64
+		7 => 128
+		8 => 256
+		9 => 512
+
+config CRYPTO_DEV_FSL_CAAM_INTC
+	bool "Job Ring interrupt coalescing"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	help
+	  Enable the Job Ring's interrupt coalescing feature.
+
+	  Note: the driver already provides adequate
+	  interrupt coalescing in software.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+	int "Job Ring interrupt coalescing count threshold"
+	depends on CRYPTO_DEV_FSL_CAAM_INTC
+	range 1 255
+	default 255
+	help
+	  Select number of descriptor completions to queue before
+	  raising an interrupt, in the range 1-255. Note that a selection
+	  of 1 functionally defeats the coalescing feature, and a selection
+	  equal or greater than the job ring size will force timeouts.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+	int "Job Ring interrupt coalescing timer threshold"
+	depends on CRYPTO_DEV_FSL_CAAM_INTC
+	range 1 65535
+	default 2048
+	help
+	  Select number of bus clocks/64 to timeout in the case that one or
+	  more descriptor completions are queued without reaching the count
+	  threshold. Range is 1-65535.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+	tristate "Register algorithm implementations with the Crypto API"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	default y
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	help
+	  Selecting this will offload crypto for users of the
+	  scatterlist crypto API (such as the linux native IPSec
+	  stack) to the SEC4 via job ring.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamalg.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+	tristate "Queue Interface as Crypto API backend"
+	depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
+	default y
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	help
+	  Selecting this will use CAAM Queue Interface (QI) for sending
+	  & receiving crypto jobs to/from CAAM. This gives better performance
+	  than job ring interface when the number of cores are more than the
+	  number of job rings assigned to the kernel. The number of portals
+	  assigned to the kernel should also be more than the number of
+	  job rings.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamalg_qi.
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API
+	tristate "Register hash algorithm implementations with Crypto API"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	default y
+	select CRYPTO_HASH
+	help
+	  Selecting this will offload ahash for users of the
+	  scatterlist crypto API to the SEC4 via job ring.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamhash.
+
+config CRYPTO_DEV_FSL_CAAM_PKC_API
+        tristate "Register public key cryptography implementations with Crypto API"
+        depends on CRYPTO_DEV_FSL_CAAM_JR
+        default y
+        select CRYPTO_RSA
+        help
+          Selecting this will allow SEC Public key support for RSA.
+          Supported cryptographic primitives: encryption, decryption,
+          signature and verification.
+          To compile this as a module, choose M here: the module
+          will be called caam_pkc.
+
+config CRYPTO_DEV_FSL_CAAM_RNG_API
+	tristate "Register caam device for hwrng API"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	default y
+	select CRYPTO_RNG
+	select HW_RANDOM
+	help
+	  Selecting this will register the SEC4 hardware rng to
+	  the hw_random API for suppying the kernel entropy pool.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamrng.
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+	bool "Enable debug output in CAAM driver"
+	depends on CRYPTO_DEV_FSL_CAAM
+	help
+	  Selecting this will enable printing of various debug
+	  information in the CAAM driver.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+	def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
+		      CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
new file mode 100644
index 0000000..cb652ee
--- /dev/null
+++ b/drivers/crypto/caam/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the CAAM backend and dependent components
+#
+ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
+	ccflags-y := -DDEBUG
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
+
+caam-objs := ctrl.o
+caam_jr-objs := jr.o key_gen.o error.o
+caam_pkc-y := caampkc.o pkc_desc.o
+ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
+	ccflags-y += -DCONFIG_CAAM_QI
+	caam-objs += qi.o
+endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
new file mode 100644
index 0000000..ec40f99
--- /dev/null
+++ b/drivers/crypto/caam/caamalg.c
@@ -0,0 +1,3561 @@
+/*
+ * caam - Freescale FSL CAAM support for crypto API
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ * Based on talitos crypto API driver.
+ *
+ * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
+ *
+ * ---------------                     ---------------
+ * | JobDesc #1  |-------------------->|  ShareDesc  |
+ * | *(packet 1) |                     |   (PDB)     |
+ * ---------------      |------------->|  (hashKey)  |
+ *       .              |              | (cipherKey) |
+ *       .              |    |-------->| (operation) |
+ * ---------------      |    |         ---------------
+ * | JobDesc #2  |------|    |
+ * | *(packet 2) |           |
+ * ---------------           |
+ *       .                   |
+ *       .                   |
+ * ---------------           |
+ * | JobDesc #3  |------------
+ * | *(packet 3) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header            |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR       |
+ * | (output buffer)   |
+ * | (output length)   |
+ * | SEQ_IN_PTR        |
+ * | (input buffer)    |
+ * | (input length)    |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY		3000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
+					 CTR_RFC3686_NONCE_SIZE + \
+					 SHA512_DIGEST_SIZE * 2)
+
+#define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
+#define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
+					 CAAM_CMD_SZ * 4)
+#define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
+					 CAAM_CMD_SZ * 5)
+
+#define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
+#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+static struct list_head alg_list;
+
+struct caam_alg_entry {
+	int class1_alg_type;
+	int class2_alg_type;
+	bool rfc3686;
+	bool geniv;
+};
+
+struct caam_aead_alg {
+	struct aead_alg aead;
+	struct caam_alg_entry caam;
+	bool registered;
+};
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+	u32 sh_desc_enc[DESC_MAX_USED_LEN];
+	u32 sh_desc_dec[DESC_MAX_USED_LEN];
+	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+	u8 key[CAAM_MAX_KEY_SIZE];
+	dma_addr_t sh_desc_enc_dma;
+	dma_addr_t sh_desc_dec_dma;
+	dma_addr_t sh_desc_givenc_dma;
+	dma_addr_t key_dma;
+	enum dma_data_direction dir;
+	struct device *jrdev;
+	struct alginfo adata;
+	struct alginfo cdata;
+	unsigned int authsize;
+};
+
+static int aead_null_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+			ctx->adata.keylen_pad;
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+		ctx->adata.key_inline = true;
+		ctx->adata.key_virt = ctx->key;
+	} else {
+		ctx->adata.key_inline = false;
+		ctx->adata.key_dma = ctx->key_dma;
+	}
+
+	/* aead_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+				    ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+		ctx->adata.key_inline = true;
+		ctx->adata.key_virt = ctx->key;
+	} else {
+		ctx->adata.key_inline = false;
+		ctx->adata.key_dma = ctx->key_dma;
+	}
+
+	/* aead_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+				    ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 struct caam_aead_alg, aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	u32 ctx1_iv_off = 0;
+	u32 *desc, *nonce = NULL;
+	u32 inl_mask;
+	unsigned int data_len[2];
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = alg->caam.rfc3686;
+
+	if (!ctx->authsize)
+		return 0;
+
+	/* NULL encryption / decryption */
+	if (!ctx->cdata.keylen)
+		return aead_null_set_sh_desc(aead);
+
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+	}
+
+	data_len[0] = ctx->adata.keylen_pad;
+	data_len[1] = ctx->cdata.keylen;
+
+	if (alg->caam.geniv)
+		goto skip_enc;
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (desc_inline_query(DESC_AEAD_ENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	/* aead_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
+			       ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
+			       false, ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+skip_enc:
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (desc_inline_query(DESC_AEAD_DEC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	/* aead_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+			       ctx->authsize, alg->caam.geniv, is_rfc3686,
+			       nonce, ctx1_iv_off, false, ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	if (!alg->caam.geniv)
+		goto skip_givenc;
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	/* aead_givencrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+				  ctx->authsize, is_rfc3686, nonce,
+				  ctx1_iv_off, false, ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+skip_givenc:
+	return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc,
+				    unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	aead_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * AES GCM encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_GCM_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_GCM_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	gcm_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * RFC4106 encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+				  false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+				  false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4106_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * RFC4543 encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+				  false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+				  false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4543_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead,
+			       const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	struct crypto_authenc_keys keys;
+	int ret = 0;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+#ifdef DEBUG
+	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
+	       keys.authkeylen);
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	/*
+	 * If DKP is supported, use it in the shared descriptor to generate
+	 * the split key.
+	 */
+	if (ctrlpriv->era >= 6) {
+		ctx->adata.keylen = keys.authkeylen;
+		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+						      OP_ALG_ALGSEL_MASK);
+
+		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+			goto badkey;
+
+		memcpy(ctx->key, keys.authkey, keys.authkeylen);
+		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+		       keys.enckeylen);
+		dma_sync_single_for_device(jrdev, ctx->key_dma,
+					   ctx->adata.keylen_pad +
+					   keys.enckeylen, ctx->dir);
+		goto skip_split_key;
+	}
+
+	ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
+			    keys.enckeylen);
+	if (ret) {
+		goto badkey;
+	}
+
+	/* postpend encryption key to auth split key */
+	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+				   keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+		       ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+skip_split_key:
+	ctx->cdata.keylen = keys.enckeylen;
+	memzero_explicit(&keys, sizeof(keys));
+	return aead_set_sh_desc(aead);
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+		      const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+	ctx->cdata.keylen = keylen;
+
+	return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->cdata.keylen = keylen - 4;
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+				   ctx->dir);
+	return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->cdata.keylen = keylen - 4;
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+				   ctx->dir);
+	return rfc4543_set_sh_desc(aead);
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc;
+	u32 ctx1_iv_off = 0;
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode &&
+				 (strstr(alg_name, "rfc3686") != NULL));
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 *	| *key = {KEY, NONCE}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		keylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = key;
+	ctx->cdata.key_inline = true;
+
+	/* ablkcipher_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+				     ctx1_iv_off);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/* ablkcipher_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+				     ctx1_iv_off);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/* ablkcipher_givencrypt shared descriptor */
+	desc = ctx->sh_desc_givenc;
+	cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+					ctx1_iv_off);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+				 const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+
+	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(ablkcipher,
+					    CRYPTO_TFM_RES_BAD_KEY_LEN);
+		dev_err(jrdev, "key size mismatch\n");
+		return -EINVAL;
+	}
+
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = key;
+	ctx->cdata.key_inline = true;
+
+	/* xts_ablkcipher_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/* xts_ablkcipher_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input s/w scatterlist
+ * @dst_nents: number of segments in output s/w scatterlist
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct aead_edesc {
+	int src_nents;
+	int dst_nents;
+	int sec4_sg_bytes;
+	dma_addr_t sec4_sg_dma;
+	struct sec4_sg_entry *sec4_sg;
+	u32 hw_desc[];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input s/w scatterlist
+ * @dst_nents: number of segments in output s/w scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @iv_dir: DMA mapping direction for IV
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ *	     and IV
+ */
+struct ablkcipher_edesc {
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	enum dma_data_direction iv_dir;
+	int sec4_sg_bytes;
+	dma_addr_t sec4_sg_dma;
+	struct sec4_sg_entry *sec4_sg;
+	u32 hw_desc[0];
+};
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+		       struct scatterlist *dst, int src_nents,
+		       int dst_nents,
+		       dma_addr_t iv_dma, int ivsize,
+		       enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+		       int sec4_sg_bytes)
+{
+	if (dst != src) {
+		if (src_nents)
+			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+	}
+
+	if (iv_dma)
+		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+	if (sec4_sg_bytes)
+		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
+				 DMA_TO_DEVICE);
+}
+
+static void aead_unmap(struct device *dev,
+		       struct aead_edesc *edesc,
+		       struct aead_request *req)
+{
+	caam_unmap(dev, req->src, req->dst,
+		   edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+			     struct ablkcipher_edesc *edesc,
+			     struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	caam_unmap(dev, req->src, req->dst,
+		   edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, edesc->iv_dir,
+		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct aead_request *req = context;
+	struct aead_edesc *edesc;
+
+#ifdef DEBUG
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
+
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	aead_unmap(jrdev, edesc, req);
+
+	kfree(edesc);
+
+	aead_request_complete(req, err);
+}
+
+static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct aead_request *req = context;
+	struct aead_edesc *edesc;
+
+#ifdef DEBUG
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
+
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	aead_unmap(jrdev, edesc, req);
+
+	/*
+	 * verify hw auth check passed else return -EBADMSG
+	 */
+	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
+		err = -EBADMSG;
+
+	kfree(edesc);
+
+	aead_request_complete(req, err);
+}
+
+static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct ablkcipher_request *req = context;
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+#ifdef DEBUG
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       edesc->src_nents > 1 ? 100 : ivsize, 1);
+#endif
+	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+
+	ablkcipher_unmap(jrdev, edesc, req);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block. This is used e.g. by the CTS mode.
+	 */
+	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+				 ivsize, 0);
+
+	/* In case initial IV was generated, copy it in GIVCIPHER request */
+	if (edesc->iv_dir == DMA_FROM_DEVICE) {
+		u8 *iv;
+		struct skcipher_givcrypt_request *greq;
+
+		greq = container_of(req, struct skcipher_givcrypt_request,
+				    creq);
+		iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
+		     edesc->sec4_sg_bytes;
+		memcpy(greq->giv, iv, ivsize);
+	}
+
+	kfree(edesc);
+
+	ablkcipher_request_complete(req, err);
+}
+
+static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				    void *context)
+{
+	struct ablkcipher_request *req = context;
+	struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+#endif
+	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+
+	ablkcipher_unmap(jrdev, edesc, req);
+	kfree(edesc);
+
+	ablkcipher_request_complete(req, err);
+}
+
+/*
+ * Fill in aead job descriptor
+ */
+static void init_aead_job(struct aead_request *req,
+			  struct aead_edesc *edesc,
+			  bool all_contig, bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	int authsize = ctx->authsize;
+	u32 *desc = edesc->hw_desc;
+	u32 out_options, in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, sec4_sg_index = 0;
+	dma_addr_t ptr;
+	u32 *sh_desc;
+
+	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (all_contig) {
+		src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
+		in_options = 0;
+	} else {
+		src_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+		in_options = LDST_SGF;
+	}
+
+	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
+			  in_options);
+
+	dst_dma = src_dma;
+	out_options = in_options;
+
+	if (unlikely(req->src != req->dst)) {
+		if (edesc->dst_nents == 1) {
+			dst_dma = sg_dma_address(req->dst);
+		} else {
+			dst_dma = edesc->sec4_sg_dma +
+				  sec4_sg_index *
+				  sizeof(struct sec4_sg_entry);
+			out_options = LDST_SGF;
+		}
+	}
+
+	if (encrypt)
+		append_seq_out_ptr(desc, dst_dma,
+				   req->assoclen + req->cryptlen + authsize,
+				   out_options);
+	else
+		append_seq_out_ptr(desc, dst_dma,
+				   req->assoclen + req->cryptlen - authsize,
+				   out_options);
+}
+
+static void init_gcm_job(struct aead_request *req,
+			 struct aead_edesc *edesc,
+			 bool all_contig, bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 *desc = edesc->hw_desc;
+	bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
+	unsigned int last;
+
+	init_aead_job(req, edesc, all_contig, encrypt);
+	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+
+	/* BUG This should not be specific to generic GCM. */
+	last = 0;
+	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
+		last = FIFOLD_TYPE_LAST1;
+
+	/* Read GCM IV */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
+	/* Append Salt */
+	if (!generic_gcm)
+		append_data(desc, ctx->key + ctx->cdata.keylen, 4);
+	/* Append IV */
+	append_data(desc, req->iv, ivsize);
+	/* End of blank commands */
+}
+
+static void init_authenc_job(struct aead_request *req,
+			     struct aead_edesc *edesc,
+			     bool all_contig, bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 struct caam_aead_alg, aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = alg->caam.rfc3686;
+	u32 *desc = edesc->hw_desc;
+	u32 ivoffset = 0;
+
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ivoffset = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686)
+		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
+
+	init_aead_job(req, edesc, all_contig, encrypt);
+
+	/*
+	 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
+	 * having DPOVRD as destination.
+	 */
+	if (ctrlpriv->era < 3)
+		append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+	else
+		append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
+
+	if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
+		append_load_as_imm(desc, req->iv, ivsize,
+				   LDST_CLASS_1_CCB |
+				   LDST_SRCDST_BYTE_CONTEXT |
+				   (ivoffset << LDST_OFFSET_SHIFT));
+}
+
+/*
+ * Fill in ablkcipher job descriptor
+ */
+static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+				struct ablkcipher_edesc *edesc,
+				struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc = edesc->hw_desc;
+	u32 out_options = 0;
+	dma_addr_t dst_dma;
+	int len;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+	pr_err("asked=%d, nbytes%d\n",
+	       (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+#endif
+	caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		     edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+			  LDST_SGF);
+
+	if (likely(req->src == req->dst)) {
+		dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
+		out_options = LDST_SGF;
+	} else {
+		if (edesc->dst_nents == 1) {
+			dst_dma = sg_dma_address(req->dst);
+		} else {
+			dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
+				  sizeof(struct sec4_sg_entry);
+			out_options = LDST_SGF;
+		}
+	}
+	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
+}
+
+/*
+ * Fill in ablkcipher givencrypt job descriptor
+ */
+static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+				    struct ablkcipher_edesc *edesc,
+				    struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc = edesc->hw_desc;
+	u32 in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+#endif
+	caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		     edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (edesc->src_nents == 1) {
+		src_dma = sg_dma_address(req->src);
+		in_options = 0;
+	} else {
+		src_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+		in_options = LDST_SGF;
+	}
+	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+
+	dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+		  sizeof(struct sec4_sg_entry);
+	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+}
+
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+					   int desc_bytes, bool *all_contig_ptr,
+					   bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct aead_edesc *edesc;
+	int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
+	unsigned int authsize = ctx->authsize;
+
+	if (unlikely(req->dst != req->src)) {
+		src_nents = sg_nents_for_len(req->src, req->assoclen +
+					     req->cryptlen);
+		if (unlikely(src_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+				req->assoclen + req->cryptlen);
+			return ERR_PTR(src_nents);
+		}
+
+		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+					     req->cryptlen +
+						(encrypt ? authsize :
+							   (-authsize)));
+		if (unlikely(dst_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+				req->assoclen + req->cryptlen +
+				(encrypt ? authsize : (-authsize)));
+			return ERR_PTR(dst_nents);
+		}
+	} else {
+		src_nents = sg_nents_for_len(req->src, req->assoclen +
+					     req->cryptlen +
+					     (encrypt ? authsize : 0));
+		if (unlikely(src_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+				req->assoclen + req->cryptlen +
+				(encrypt ? authsize : 0));
+			return ERR_PTR(src_nents);
+		}
+	}
+
+	if (likely(req->src == req->dst)) {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		/* Cover also the case of null (zero length) input data */
+		if (src_nents) {
+			mapped_src_nents = dma_map_sg(jrdev, req->src,
+						      src_nents, DMA_TO_DEVICE);
+			if (unlikely(!mapped_src_nents)) {
+				dev_err(jrdev, "unable to map source\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		} else {
+			mapped_src_nents = 0;
+		}
+
+		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(jrdev, "unable to map destination\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
+	sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+			GFP_DMA | flags);
+	if (!edesc) {
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+			 desc_bytes;
+	*all_contig_ptr = !(mapped_src_nents > 1);
+
+	sec4_sg_index = 0;
+	if (mapped_src_nents > 1) {
+		sg_to_sec4_sg_last(req->src, mapped_src_nents,
+				   edesc->sec4_sg + sec4_sg_index, 0);
+		sec4_sg_index += mapped_src_nents;
+	}
+	if (mapped_dst_nents > 1) {
+		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+				   edesc->sec4_sg + sec4_sg_index, 0);
+	}
+
+	if (!sec4_sg_bytes)
+		return edesc;
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+	return edesc;
+}
+
+static int gcm_encrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor */
+	init_gcm_job(req, edesc, all_contig, true);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_encrypt(req);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+				 &all_contig, true);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor */
+	init_authenc_job(req, edesc, all_contig, true);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int gcm_decrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_gcm_job(req, edesc, all_contig, false);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_decrypt(req);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		     req->assoclen + req->cryptlen, 1);
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+				 &all_contig, false);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_authenc_job(req, edesc, all_contig, false);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+						       *req, int desc_bytes)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	u8 *iv;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (req->dst != req->src) {
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+	}
+
+	if (likely(req->src == req->dst)) {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(jrdev, "unable to map destination\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	sec4_sg_ents = 1 + mapped_src_nents;
+	dst_sg_idx = sec4_sg_ents;
+	sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+
+	/*
+	 * allocate space for base edesc and hw desc commands, link tables, IV
+	 */
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+			GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+	edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+						  desc_bytes);
+	edesc->iv_dir = DMA_TO_DEVICE;
+
+	/* Make sure IV is located in a DMAable area */
+	iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+	memcpy(iv, req->info, ivsize);
+
+	iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+	sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+
+	if (mapped_dst_nents > 1) {
+		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+				   edesc->sec4_sg + dst_sg_idx, 0);
+	}
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+		       sec4_sg_bytes, 1);
+#endif
+
+	return edesc;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block.
+	 */
+	scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+				 ivsize, 0);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+	desc = edesc->hw_desc;
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+/*
+ * allocate and map the ablkcipher extended descriptor
+ * for ablkcipher givencrypt
+ */
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+				struct skcipher_givcrypt_request *greq,
+				int desc_bytes)
+{
+	struct ablkcipher_request *req = &greq->creq;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags &  CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	u8 *iv;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (likely(req->src == req->dst)) {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		dst_nents = src_nents;
+		mapped_dst_nents = src_nents;
+	} else {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+
+		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(jrdev, "unable to map destination\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+	dst_sg_idx = sec4_sg_ents;
+	sec4_sg_ents += 1 + mapped_dst_nents;
+
+	/*
+	 * allocate space for base edesc and hw desc commands, link tables, IV
+	 */
+	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+			GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+	edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+						  desc_bytes);
+	edesc->iv_dir = DMA_FROM_DEVICE;
+
+	/* Make sure IV is located in a DMAable area */
+	iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+	iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (mapped_src_nents > 1)
+		sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
+				   0);
+
+	dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
+	sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
+			   dst_sg_idx + 1, 0);
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+	edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+		       sec4_sg_bytes, 1);
+#endif
+
+	return edesc;
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+	struct ablkcipher_request *req = &creq->creq;
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+				edesc, req);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+#define template_aead		template_u.aead
+#define template_ablkcipher	template_u.ablkcipher
+struct caam_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	u32 type;
+	union {
+		struct ablkcipher_alg ablkcipher;
+	} template_u;
+	u32 class1_alg_type;
+	u32 class2_alg_type;
+};
+
+static struct caam_alg_template driver_algs[] = {
+	/* ablkcipher descriptor */
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "chainiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "rfc3686(ctr(aes))",
+		.driver_name = "rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = xts_ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "eseqiv",
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+	},
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+	{
+		.aead = {
+			.base = {
+				.cra_name = "rfc4106(gcm(aes))",
+				.cra_driver_name = "rfc4106-gcm-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4106_setkey,
+			.setauthsize = rfc4106_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = GCM_RFC4106_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "rfc4543(gcm(aes))",
+				.cra_driver_name = "rfc4543-gcm-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4543_setkey,
+			.setauthsize = rfc4543_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = GCM_RFC4543_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	/* Galois Counter Mode */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "gcm(aes)",
+				.cra_driver_name = "gcm-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = gcm_setkey,
+			.setauthsize = gcm_setauthsize,
+			.encrypt = gcm_encrypt,
+			.decrypt = gcm_decrypt,
+			.ivsize = GCM_AES_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	/* single-pass ipsec_esp descriptor */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(md5),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-md5-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(sha1),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(sha224),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha256),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha384),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha512),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+};
+
+struct caam_crypto_alg {
+	struct crypto_alg crypto_alg;
+	struct list_head entry;
+	struct caam_alg_entry caam;
+};
+
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+			    bool uses_dkp)
+{
+	dma_addr_t dma_addr;
+	struct caam_drv_private *priv;
+
+	ctx->jrdev = caam_jr_alloc();
+	if (IS_ERR(ctx->jrdev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(ctx->jrdev);
+	}
+
+	priv = dev_get_drvdata(ctx->jrdev->parent);
+	if (priv->era >= 6 && uses_dkp)
+		ctx->dir = DMA_BIDIRECTIONAL;
+	else
+		ctx->dir = DMA_TO_DEVICE;
+
+	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
+					offsetof(struct caam_ctx,
+						 sh_desc_enc_dma),
+					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+		dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
+		caam_jr_free(ctx->jrdev);
+		return -ENOMEM;
+	}
+
+	ctx->sh_desc_enc_dma = dma_addr;
+	ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
+						   sh_desc_dec);
+	ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
+						      sh_desc_givenc);
+	ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
+
+	/* copy descriptor header template value */
+	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+	return 0;
+}
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct caam_crypto_alg *caam_alg =
+		 container_of(alg, struct caam_crypto_alg, crypto_alg);
+	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return caam_init_common(ctx, &caam_alg->caam, false);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+{
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct caam_aead_alg *caam_alg =
+		 container_of(alg, struct caam_aead_alg, aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+	return caam_init_common(ctx, &caam_alg->caam,
+				alg->setkey == aead_setkey);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
+			       offsetof(struct caam_ctx, sh_desc_enc_dma),
+			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	caam_jr_free(ctx->jrdev);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+	caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
+static void caam_aead_exit(struct crypto_aead *tfm)
+{
+	caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static void __exit caam_algapi_exit(void)
+{
+
+	struct caam_crypto_alg *t_alg, *n;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+		struct caam_aead_alg *t_alg = driver_aeads + i;
+
+		if (t_alg->registered)
+			crypto_unregister_aead(&t_alg->aead);
+	}
+
+	if (!alg_list.next)
+		return;
+
+	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+		crypto_unregister_alg(&t_alg->crypto_alg);
+		list_del(&t_alg->entry);
+		kfree(t_alg);
+	}
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+					      *template)
+{
+	struct caam_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg) {
+		pr_err("failed to allocate t_alg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	alg = &t_alg->crypto_alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 template->driver_name);
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = caam_cra_init;
+	alg->cra_exit = caam_cra_exit;
+	alg->cra_priority = CAAM_CRA_PRIORITY;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct caam_ctx);
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+			 template->type;
+	switch (template->type) {
+	case CRYPTO_ALG_TYPE_GIVCIPHER:
+		alg->cra_type = &crypto_givcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg->cra_type = &crypto_ablkcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	}
+
+	t_alg->caam.class1_alg_type = template->class1_alg_type;
+	t_alg->caam.class2_alg_type = template->class2_alg_type;
+
+	return t_alg;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+	struct aead_alg *alg = &t_alg->aead;
+
+	alg->base.cra_module = THIS_MODULE;
+	alg->base.cra_priority = CAAM_CRA_PRIORITY;
+	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+	alg->init = caam_aead_init;
+	alg->exit = caam_aead_exit;
+}
+
+static int __init caam_algapi_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	int i = 0, err = 0;
+	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
+	bool registered = false;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+
+	INIT_LIST_HEAD(&alg_list);
+
+	/*
+	 * Register crypto algorithms the device supports.
+	 * First, detect presence and attributes of DES, AES, and MD blocks.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+	/* If MD is present, limit digest size based on LP256 */
+	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+		md_limit = SHA256_DIGEST_SIZE;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		struct caam_crypto_alg *t_alg;
+		struct caam_alg_template *alg = driver_algs + i;
+		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (alg_sel == OP_ALG_ALGSEL_DES)))
+				continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+				continue;
+
+		/*
+		 * Check support for AES modes not available
+		 * on LP devices.
+		 */
+		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+			if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+			     OP_ALG_AAI_XTS)
+				continue;
+
+		t_alg = caam_alg_alloc(alg);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
+			continue;
+		}
+
+		err = crypto_register_alg(&t_alg->crypto_alg);
+		if (err) {
+			pr_warn("%s alg registration failed\n",
+				t_alg->crypto_alg.cra_driver_name);
+			kfree(t_alg);
+			continue;
+		}
+
+		list_add_tail(&t_alg->entry, &alg_list);
+		registered = true;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+		struct caam_aead_alg *t_alg = driver_aeads + i;
+		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+				continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+				continue;
+
+		/*
+		 * Check support for AES algorithms not available
+		 * on LP devices.
+		 */
+		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+			if (alg_aai == OP_ALG_AAI_GCM)
+				continue;
+
+		/*
+		 * Skip algorithms requiring message digests
+		 * if MD or MD size is not supported by device.
+		 */
+		if (c2_alg_sel &&
+		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+				continue;
+
+		caam_aead_alg_init(t_alg);
+
+		err = crypto_register_aead(&t_alg->aead);
+		if (err) {
+			pr_warn("%s alg registration failed\n",
+				t_alg->aead.base.cra_driver_name);
+			continue;
+		}
+
+		t_alg->registered = true;
+		registered = true;
+	}
+
+	if (registered)
+		pr_info("caam algorithms registered in /proc/crypto\n");
+
+	return err;
+}
+
+module_init(caam_algapi_init);
+module_exit(caam_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 0000000..a408edd
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1547 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+	u32 *jump_cmd, *uncond_jump_cmd;
+
+	/* DK bit is valid only for AES */
+	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+		append_operation(desc, type | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+		return;
+	}
+
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+	append_operation(desc, type | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT);
+	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, jump_cmd);
+	append_operation(desc, type | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+	set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ *                               (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize, int era)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (era < 6) {
+		if (adata->key_inline)
+			append_key_as_imm(desc, adata->key_virt,
+					  adata->keylen_pad, adata->keylen,
+					  CLASS_2 | KEY_DEST_MDHA_SPLIT |
+					  KEY_ENC);
+		else
+			append_key(desc, adata->key_dma, adata->keylen,
+				   CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	} else {
+		append_proto_dkp(desc, adata);
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* assoclen + cryptlen = seqinlen */
+	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Prepare to read and write cryptlen + assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+				    MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+				     MOVE_DEST_DESCBUF |
+				     MOVE_WAITCOMP |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+		    MOVE_AUX_LS);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead null enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ *                               (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize, int era)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (era < 6) {
+		if (adata->key_inline)
+			append_key_as_imm(desc, adata->key_virt,
+					  adata->keylen_pad, adata->keylen,
+					  CLASS_2 | KEY_DEST_MDHA_SPLIT |
+					  KEY_ENC);
+		else
+			append_key(desc, adata->key_dma, adata->keylen,
+				   CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	} else {
+		append_proto_dkp(desc, adata);
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* assoclen + cryptlen = seqoutlen */
+	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Prepare to read and write cryptlen + assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+				    MOVE_DEST_MATH2 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+				     MOVE_DEST_DESCBUF |
+				     MOVE_WAITCOMP |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Read and write cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/*
+	 * Insert a NOP here, since we need at least 4 instructions between
+	 * code patching the descriptor buffer and the location being patched.
+	 */
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, jump_cmd);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+		    MOVE_AUX_LS);
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Load ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead null dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+				  struct alginfo * const cdata,
+				  struct alginfo * const adata,
+				  const bool is_rfc3686, u32 *nonce, int era)
+{
+	u32 *key_jump_cmd;
+	unsigned int enckeylen = cdata->keylen;
+
+	/* Note: Context registers are saved. */
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/*
+	 * RFC3686 specific:
+	 *	| key = {AUTH_KEY, ENC_KEY, NONCE}
+	 *	| enckeylen = encryption key size + nonce size
+	 */
+	if (is_rfc3686)
+		enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+	if (era < 6) {
+		if (adata->key_inline)
+			append_key_as_imm(desc, adata->key_virt,
+					  adata->keylen_pad, adata->keylen,
+					  CLASS_2 | KEY_DEST_MDHA_SPLIT |
+					  KEY_ENC);
+		else
+			append_key(desc, adata->key_dma, adata->keylen,
+				   CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	} else {
+		append_proto_dkp(desc, adata);
+	}
+
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, enckeylen,
+				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686) {
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc,
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ *                          (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool is_rfc3686,
+			    u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
+			    int era)
+{
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+				LDST_SRCDST_BYTE_CONTEXT |
+				(ctx1_iv_off << LDST_OFFSET_SHIFT));
+	}
+
+	/* Read and write assoclen bytes */
+	if (is_qi || era < 3) {
+		append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+	} else {
+		append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+		append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+	}
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+				      FIFOLDST_VLF);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ *                          (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool geniv,
+			    const bool is_rfc3686, u32 *nonce,
+			    const u32 ctx1_iv_off, const bool is_qi, int era)
+{
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		if (!geniv)
+			append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+					LDST_SRCDST_BYTE_CONTEXT |
+					(ctx1_iv_off << LDST_OFFSET_SHIFT));
+	}
+
+	/* Read and write assoclen bytes */
+	if (is_qi || era < 3) {
+		append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+		if (geniv)
+			append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
+						ivsize);
+		else
+			append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
+					CAAM_CMD_SZ);
+	} else {
+		append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+		if (geniv)
+			append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
+						ivsize);
+		else
+			append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
+					CAAM_CMD_SZ);
+	}
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+
+	if (geniv) {
+		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+				LDST_SRCDST_BYTE_CONTEXT |
+				(ctx1_iv_off << LDST_OFFSET_SHIFT));
+		append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+			    (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+	}
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Choose operation */
+	if (ctx1_iv_off)
+		append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, cdata->algtype);
+
+	/* Read and write cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+	/* Load ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ *                             (non-protocol) with HW-generated initialization
+ *                             vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+			       struct alginfo *adata, unsigned int ivsize,
+			       unsigned int icvsize, const bool is_rfc3686,
+			       u32 *nonce, const u32 ctx1_iv_off,
+			       const bool is_qi, int era)
+{
+	u32 geniv, moveiv;
+
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+	}
+
+	if (is_rfc3686) {
+		if (is_qi)
+			append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+					LDST_SRCDST_BYTE_CONTEXT |
+					(ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+		goto copy_iv;
+	}
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_WAITCOMP |
+		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (ivsize << MOVE_LEN_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+	/* Copy IV to class 1 context */
+	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (ivsize << MOVE_LEN_SHIFT));
+
+	/* Return to encryption */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write assoclen bytes */
+	if (is_qi || era < 3) {
+		append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+	} else {
+		append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+		append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+	}
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+
+	/* Copy iv from outfifo to class 2 fifo */
+	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Will write ivsize + cryptlen */
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Not need to reload iv */
+	append_seq_fifo_load(desc, ivsize,
+			     FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead givenc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int ivsize, unsigned int icvsize,
+			   const bool is_qi)
+{
+	u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+	    *zero_assoc_jump_cmd2;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* skip key loading if they are loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
+					ivsize);
+	} else {
+		append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
+				CAAM_CMD_SZ);
+	}
+
+	/* if assoclen + cryptlen is ZERO, skip to ICV write */
+	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+						 JUMP_COND_MATH_Z);
+
+	if (is_qi)
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+					   JUMP_COND_MATH_Z);
+
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* cryptlen = seqinlen - assoclen */
+	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+	/* if cryptlen is ZERO jump to zero-payload commands */
+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+					    JUMP_COND_MATH_Z);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* write encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+	/* jump to ICV writing */
+	if (is_qi)
+		append_jump(desc, JUMP_TEST_ALL | 4);
+	else
+		append_jump(desc, JUMP_TEST_ALL | 2);
+
+	/* zero-payload commands */
+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+	if (is_qi)
+		/* jump to ICV writing */
+		append_jump(desc, JUMP_TEST_ALL | 2);
+
+	/* There is no input data */
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+	if (is_qi)
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+				     FIFOLD_TYPE_LAST1);
+
+	/* write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int ivsize, unsigned int icvsize,
+			   const bool is_qi)
+{
+	u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* skip key loading if they are loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL |
+				   JUMP_TEST_ALL | JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+						 JUMP_COND_MATH_Z);
+
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+	/* cryptlen = seqoutlen - assoclen */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* jump to zero-payload command if cryptlen is zero */
+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+					    JUMP_COND_MATH_Z);
+
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* store encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/* zero-payload command */
+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+	/* read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		/* Read salt and IV */
+		append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+					cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+					FIFOLD_TYPE_IV);
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Skip IV */
+	append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* cryptlen = seqoutlen - assoclen */
+	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Write encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 |
+				  KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		/* Read salt and IV */
+		append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+					cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+					FIFOLD_TYPE_IV);
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Skip IV */
+	append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* Will write cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Store payload data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read encrypted data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/* Read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	if (is_qi) {
+		/* assoclen is not needed, skip it */
+		append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+		/* Read salt and IV */
+		append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+					cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+					FIFOLD_TYPE_IV);
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	/* assoclen + cryptlen = seqinlen */
+	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
+
+	/* Will read assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Will write assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Read and write assoclen + cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	if (is_qi) {
+		/* assoclen is not needed, skip it */
+		append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+		/* Read salt and IV */
+		append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+					cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+					FIFOLD_TYPE_IV);
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	/* assoclen + cryptlen = seqoutlen */
+	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
+
+	/* Will read assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Will write assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Store payload data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* In-snoop assoclen + cryptlen data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Load iv */
+	append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* load IV */
+	append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Choose operation */
+	if (ctx1_iv_off)
+		append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, cdata->algtype);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ *                                   with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+				     unsigned int ivsize, const bool is_rfc3686,
+				     const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd, geniv;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load Nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+		(ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+		    MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Copy generated IV to memory */
+	append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	if (ctx1_iv_off)
+		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+			    (1 << JUMP_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ *                                    descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+	__be64 sector_size = cpu_to_be64(512);
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 keys only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load sector size with index 40 bytes (0x28) */
+	append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+			   LDST_SRCDST_BYTE_CONTEXT |
+			   (0x28 << LDST_OFFSET_SHIFT));
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/*
+	 * create sequence for loading the sector index
+	 * Upper 8B of IV - will be used as sector index
+	 * Lower 8B of IV - will be discarded
+	 */
+	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			(0x20 << LDST_OFFSET_SHIFT));
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ *                                    descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+	__be64 sector_size = cpu_to_be64(512);
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load sector size with index 40 bytes (0x28) */
+	append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+			   LDST_SRCDST_BYTE_CONTEXT |
+			   (0x28 << LDST_OFFSET_SHIFT));
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/*
+	 * create sequence for loading the sector index
+	 * Upper 8B of IV - will be used as sector index
+	 * Lower 8B of IV - will be discarded
+	 */
+	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			(0x20 << LDST_OFFSET_SHIFT));
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+	/* Load operation */
+	append_dec_op1(desc, cdata->algtype);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 0000000..a917af5
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_ENC_LEN		(DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_DEC_LEN		(DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_GIVENC_LEN		(DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_ENC_LEN		(DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_DEC_LEN		(DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_ENC_LEN		(DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_DEC_LEN		(DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_ENC_LEN		(DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_DEC_LEN		(DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
+					 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
+					 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize, int era);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize, int era);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool is_rfc3686,
+			    u32 *nonce, const u32 ctx1_iv_off,
+			    const bool is_qi, int era);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool geniv,
+			    const bool is_rfc3686, u32 *nonce,
+			    const u32 ctx1_iv_off, const bool is_qi, int era);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+			       struct alginfo *adata, unsigned int ivsize,
+			       unsigned int icvsize, const bool is_rfc3686,
+			       u32 *nonce, const u32 ctx1_iv_off,
+			       const bool is_qi, int era);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int ivsize, unsigned int icvsize,
+			   const bool is_qi);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int ivsize, unsigned int icvsize,
+			   const bool is_qi);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+				     unsigned int ivsize, const bool is_rfc3686,
+				     const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
new file mode 100644
index 0000000..d7aa7d7
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -0,0 +1,2846 @@
+/*
+ * Freescale FSL CAAM support for crypto API over QI backend.
+ * Based on caamalg.c
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#include "compat.h"
+#include "ctrl.h"
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_qm.h"
+#include "key_gen.h"
+#include "qi.h"
+#include "jr.h"
+#include "caamalg_desc.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY		2000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
+					 SHA512_DIGEST_SIZE * 2)
+
+#define DESC_MAX_USED_BYTES		(DESC_QI_AEAD_GIVENC_LEN + \
+					 CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+struct caam_alg_entry {
+	int class1_alg_type;
+	int class2_alg_type;
+	bool rfc3686;
+	bool geniv;
+};
+
+struct caam_aead_alg {
+	struct aead_alg aead;
+	struct caam_alg_entry caam;
+	bool registered;
+};
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+	struct device *jrdev;
+	u32 sh_desc_enc[DESC_MAX_USED_LEN];
+	u32 sh_desc_dec[DESC_MAX_USED_LEN];
+	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+	u8 key[CAAM_MAX_KEY_SIZE];
+	dma_addr_t key_dma;
+	enum dma_data_direction dir;
+	struct alginfo adata;
+	struct alginfo cdata;
+	unsigned int authsize;
+	struct device *qidev;
+	spinlock_t lock;	/* Protects multiple init of driver context */
+	struct caam_drv_ctx *drv_ctx[NUM_OP];
+};
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 typeof(*alg), aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 ctx1_iv_off = 0;
+	u32 *nonce = NULL;
+	unsigned int data_len[2];
+	u32 inl_mask;
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = alg->caam.rfc3686;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+	}
+
+	data_len[0] = ctx->adata.keylen_pad;
+	data_len[1] = ctx->cdata.keylen;
+
+	if (alg->caam.geniv)
+		goto skip_enc;
+
+	/* aead_encrypt shared descriptor */
+	if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+			       ivsize, ctx->authsize, is_rfc3686, nonce,
+			       ctx1_iv_off, true, ctrlpriv->era);
+
+skip_enc:
+	/* aead_decrypt shared descriptor */
+	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
+			       ivsize, ctx->authsize, alg->caam.geniv,
+			       is_rfc3686, nonce, ctx1_iv_off, true,
+			       ctrlpriv->era);
+
+	if (!alg->caam.geniv)
+		goto skip_givenc;
+
+	/* aead_givencrypt shared descriptor */
+	if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+				  ivsize, ctx->authsize, is_rfc3686, nonce,
+				  ctx1_iv_off, true, ctrlpriv->era);
+
+skip_givenc:
+	return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	aead_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+		       unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	struct crypto_authenc_keys keys;
+	int ret = 0;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+#ifdef DEBUG
+	dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+		keys.authkeylen + keys.enckeylen, keys.enckeylen,
+		keys.authkeylen);
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	/*
+	 * If DKP is supported, use it in the shared descriptor to generate
+	 * the split key.
+	 */
+	if (ctrlpriv->era >= 6) {
+		ctx->adata.keylen = keys.authkeylen;
+		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+						      OP_ALG_ALGSEL_MASK);
+
+		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+			goto badkey;
+
+		memcpy(ctx->key, keys.authkey, keys.authkeylen);
+		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+		       keys.enckeylen);
+		dma_sync_single_for_device(jrdev, ctx->key_dma,
+					   ctx->adata.keylen_pad +
+					   keys.enckeylen, ctx->dir);
+		goto skip_split_key;
+	}
+
+	ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
+			    keys.enckeylen);
+	if (ret)
+		goto badkey;
+
+	/* postpend encryption key to auth split key */
+	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+				   keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+		       ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+skip_split_key:
+	ctx->cdata.keylen = keys.enckeylen;
+
+	ret = aead_set_sh_desc(aead);
+	if (ret)
+		goto badkey;
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			goto badkey;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			goto badkey;
+		}
+	}
+
+	memzero_explicit(&keys, sizeof(keys));
+	return ret;
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+			      ctx->authsize, true);
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+			      ctx->authsize, true);
+
+	return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	gcm_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+		      const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+	ctx->cdata.keylen = keylen;
+
+	ret = gcm_set_sh_desc(aead);
+	if (ret)
+		return ret;
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			return ret;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	ctx->cdata.key_virt = ctx->key;
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+				  ctx->authsize, true);
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+				  ctx->authsize, true);
+
+	return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4106_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->cdata.keylen = keylen - 4;
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+				   ctx->dir);
+
+	ret = rfc4106_set_sh_desc(aead);
+	if (ret)
+		return ret;
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			return ret;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	ctx->cdata.key_virt = ctx->key;
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+				  ctx->authsize, true);
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+				  ctx->authsize, true);
+
+	return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4543_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->cdata.keylen = keylen - 4;
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+				   ctx->dir);
+
+	ret = rfc4543_set_sh_desc(aead);
+	if (ret)
+		return ret;
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			return ret;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 ctx1_iv_off = 0;
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+	int ret = 0;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 *	| *key = {KEY, NONCE}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		keylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = key;
+	ctx->cdata.key_inline = true;
+
+	/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
+	cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+				     is_rfc3686, ctx1_iv_off);
+	cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+				     is_rfc3686, ctx1_iv_off);
+	cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
+					ivsize, is_rfc3686, ctx1_iv_off);
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			goto badkey;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			goto badkey;
+		}
+	}
+
+	if (ctx->drv_ctx[GIVENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
+					  ctx->sh_desc_givenc);
+		if (ret) {
+			dev_err(jrdev, "driver givenc context update failed\n");
+			goto badkey;
+		}
+	}
+
+	return ret;
+badkey:
+	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+				 const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	int ret = 0;
+
+	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+		dev_err(jrdev, "key size mismatch\n");
+		goto badkey;
+	}
+
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = key;
+	ctx->cdata.key_inline = true;
+
+	/* xts ablkcipher encrypt, decrypt shared descriptors */
+	cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+	cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			goto badkey;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			goto badkey;
+		}
+	}
+
+	return ret;
+badkey:
+	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table, followed by IV
+ */
+struct aead_edesc {
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	int qm_sg_bytes;
+	dma_addr_t qm_sg_dma;
+	unsigned int assoclen;
+	dma_addr_t assoclen_dma;
+	struct caam_drv_req drv_req;
+	struct qm_sg_entry sgt[0];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table, followed by IV
+ */
+struct ablkcipher_edesc {
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	int qm_sg_bytes;
+	dma_addr_t qm_sg_dma;
+	struct caam_drv_req drv_req;
+	struct qm_sg_entry sgt[0];
+};
+
+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
+					enum optype type)
+{
+	/*
+	 * This function is called on the fast path with values of 'type'
+	 * known at compile time. Invalid arguments are not expected and
+	 * thus no checks are made.
+	 */
+	struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
+	u32 *desc;
+
+	if (unlikely(!drv_ctx)) {
+		spin_lock(&ctx->lock);
+
+		/* Read again to check if some other core init drv_ctx */
+		drv_ctx = ctx->drv_ctx[type];
+		if (!drv_ctx) {
+			int cpu;
+
+			if (type == ENCRYPT)
+				desc = ctx->sh_desc_enc;
+			else if (type == DECRYPT)
+				desc = ctx->sh_desc_dec;
+			else /* (type == GIVENCRYPT) */
+				desc = ctx->sh_desc_givenc;
+
+			cpu = smp_processor_id();
+			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
+			if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+				drv_ctx->op_type = type;
+
+			ctx->drv_ctx[type] = drv_ctx;
+		}
+
+		spin_unlock(&ctx->lock);
+	}
+
+	return drv_ctx;
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+		       struct scatterlist *dst, int src_nents,
+		       int dst_nents, dma_addr_t iv_dma, int ivsize,
+		       enum optype op_type, dma_addr_t qm_sg_dma,
+		       int qm_sg_bytes)
+{
+	if (dst != src) {
+		if (src_nents)
+			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+	}
+
+	if (iv_dma)
+		dma_unmap_single(dev, iv_dma, ivsize,
+				 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
+							 DMA_TO_DEVICE);
+	if (qm_sg_bytes)
+		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static void aead_unmap(struct device *dev,
+		       struct aead_edesc *edesc,
+		       struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	int ivsize = crypto_aead_ivsize(aead);
+
+	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
+	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+			     struct ablkcipher_edesc *edesc,
+			     struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_done(struct caam_drv_req *drv_req, u32 status)
+{
+	struct device *qidev;
+	struct aead_edesc *edesc;
+	struct aead_request *aead_req = drv_req->app_ctx;
+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+	struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+	int ecode = 0;
+
+	qidev = caam_ctx->qidev;
+
+	if (unlikely(status)) {
+		u32 ssrc = status & JRSTA_SSRC_MASK;
+		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+		caam_jr_strstatus(qidev, status);
+		/*
+		 * verify hw auth check passed else return -EBADMSG
+		 */
+		if (ssrc == JRSTA_SSRC_CCB_ERROR &&
+		    err_id == JRSTA_CCBERR_ERRID_ICVCHK)
+			ecode = -EBADMSG;
+		else
+			ecode = -EIO;
+	}
+
+	edesc = container_of(drv_req, typeof(*edesc), drv_req);
+	aead_unmap(qidev, edesc, aead_req);
+
+	aead_request_complete(aead_req, ecode);
+	qi_cache_free(edesc);
+}
+
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+					   bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 typeof(*alg), aead);
+	struct device *qidev = ctx->qidev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct aead_edesc *edesc;
+	dma_addr_t qm_sg_dma, iv_dma = 0;
+	int ivsize = 0;
+	unsigned int authsize = ctx->authsize;
+	int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
+	int in_len, out_len;
+	struct qm_sg_entry *sg_table, *fd_sgt;
+	struct caam_drv_ctx *drv_ctx;
+	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+	drv_ctx = get_drv_ctx(ctx, op_type);
+	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+		return (struct aead_edesc *)drv_ctx;
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = qi_cache_alloc(GFP_DMA | flags);
+	if (unlikely(!edesc)) {
+		dev_err(qidev, "could not allocate extended descriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (likely(req->src == req->dst)) {
+		src_nents = sg_nents_for_len(req->src, req->assoclen +
+					     req->cryptlen +
+						(encrypt ? authsize : 0));
+		if (unlikely(src_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+				req->assoclen + req->cryptlen +
+				(encrypt ? authsize : 0));
+			qi_cache_free(edesc);
+			return ERR_PTR(src_nents);
+		}
+
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			qi_cache_free(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		src_nents = sg_nents_for_len(req->src, req->assoclen +
+					     req->cryptlen);
+		if (unlikely(src_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+				req->assoclen + req->cryptlen);
+			qi_cache_free(edesc);
+			return ERR_PTR(src_nents);
+		}
+
+		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+					     req->cryptlen +
+					     (encrypt ? authsize :
+							(-authsize)));
+		if (unlikely(dst_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+				req->assoclen + req->cryptlen +
+				(encrypt ? authsize : (-authsize)));
+			qi_cache_free(edesc);
+			return ERR_PTR(dst_nents);
+		}
+
+		if (src_nents) {
+			mapped_src_nents = dma_map_sg(qidev, req->src,
+						      src_nents, DMA_TO_DEVICE);
+			if (unlikely(!mapped_src_nents)) {
+				dev_err(qidev, "unable to map source\n");
+				qi_cache_free(edesc);
+				return ERR_PTR(-ENOMEM);
+			}
+		} else {
+			mapped_src_nents = 0;
+		}
+
+		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(qidev, "unable to map destination\n");
+			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+			qi_cache_free(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+		ivsize = crypto_aead_ivsize(aead);
+
+	/*
+	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+	 * Input is not contiguous.
+	 */
+	qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+	sg_table = &edesc->sgt[0];
+	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+		     CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (ivsize) {
+		u8 *iv = (u8 *)(sg_table + qm_sg_ents);
+
+		/* Make sure IV is located in a DMAable area */
+		memcpy(iv, req->iv, ivsize);
+
+		iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(qidev, iv_dma)) {
+			dev_err(qidev, "unable to map IV\n");
+			caam_unmap(qidev, req->src, req->dst, src_nents,
+				   dst_nents, 0, 0, 0, 0, 0);
+			qi_cache_free(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->drv_req.app_ctx = req;
+	edesc->drv_req.cbk = aead_done;
+	edesc->drv_req.drv_ctx = drv_ctx;
+
+	edesc->assoclen = cpu_to_caam32(req->assoclen);
+	edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
+					     DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
+		dev_err(qidev, "unable to map assoclen\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+	qm_sg_index++;
+	if (ivsize) {
+		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+		qm_sg_index++;
+	}
+	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+	qm_sg_index += mapped_src_nents;
+
+	if (mapped_dst_nents > 1)
+		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+				 qm_sg_index, 0);
+
+	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, qm_sg_dma)) {
+		dev_err(qidev, "unable to map S/G table\n");
+		dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->qm_sg_dma = qm_sg_dma;
+	edesc->qm_sg_bytes = qm_sg_bytes;
+
+	out_len = req->assoclen + req->cryptlen +
+		  (encrypt ? ctx->authsize : (-ctx->authsize));
+	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+	fd_sgt = &edesc->drv_req.fd_sgt[0];
+	dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
+	if (req->dst == req->src) {
+		if (mapped_src_nents == 1)
+			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+					 out_len, 0);
+		else
+			dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
+					     (1 + !!ivsize) * sizeof(*sg_table),
+					     out_len, 0);
+	} else if (mapped_dst_nents == 1) {
+		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
+				 0);
+	} else {
+		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+				     qm_sg_index, out_len, 0);
+	}
+
+	return edesc;
+}
+
+static inline int aead_crypt(struct aead_request *req, bool encrypt)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	int ret;
+
+	if (unlikely(caam_congested))
+		return -EAGAIN;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, encrypt);
+	if (IS_ERR_OR_NULL(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor */
+	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(ctx->qidev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+	return aead_crypt(req, true);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+	return aead_crypt(req, false);
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return aead_crypt(req, true);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return aead_crypt(req, false);
+}
+
+static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+{
+	struct ablkcipher_edesc *edesc;
+	struct ablkcipher_request *req = drv_req->app_ctx;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *qidev = caam_ctx->qidev;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+#ifdef DEBUG
+	dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
+#endif
+
+	edesc = container_of(drv_req, typeof(*edesc), drv_req);
+
+	if (status)
+		caam_jr_strstatus(qidev, status);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       edesc->src_nents > 1 ? 100 : ivsize, 1);
+	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+	ablkcipher_unmap(qidev, edesc, req);
+
+	/* In case initial IV was generated, copy it in GIVCIPHER request */
+	if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
+		u8 *iv;
+		struct skcipher_givcrypt_request *greq;
+
+		greq = container_of(req, struct skcipher_givcrypt_request,
+				    creq);
+		iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
+		memcpy(greq->giv, iv, ivsize);
+	}
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block. This is used e.g. by the CTS mode.
+	 */
+	if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
+		scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+					 ivsize, ivsize, 0);
+
+	qi_cache_free(edesc);
+	ablkcipher_request_complete(req, status);
+}
+
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+						       *req, bool encrypt)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *qidev = ctx->qidev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	u8 *iv;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+	struct qm_sg_entry *sg_table, *fd_sgt;
+	struct caam_drv_ctx *drv_ctx;
+	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+	drv_ctx = get_drv_ctx(ctx, op_type);
+	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+		return (struct ablkcipher_edesc *)drv_ctx;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (unlikely(req->src != req->dst)) {
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(qidev, "unable to map destination\n");
+			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	qm_sg_ents = 1 + mapped_src_nents;
+	dst_sg_idx = qm_sg_ents;
+
+	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* allocate space for base edesc, link tables and IV */
+	edesc = qi_cache_alloc(GFP_DMA | flags);
+	if (unlikely(!edesc)) {
+		dev_err(qidev, "could not allocate extended descriptor\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Make sure IV is located in a DMAable area */
+	sg_table = &edesc->sgt[0];
+	iv = (u8 *)(sg_table + qm_sg_ents);
+	memcpy(iv, req->info, ivsize);
+
+	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, iv_dma)) {
+		dev_err(qidev, "unable to map IV\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->qm_sg_bytes = qm_sg_bytes;
+	edesc->drv_req.app_ctx = req;
+	edesc->drv_req.cbk = ablkcipher_done;
+	edesc->drv_req.drv_ctx = drv_ctx;
+
+	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+
+	if (mapped_dst_nents > 1)
+		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+				 dst_sg_idx, 0);
+
+	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+					  DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
+		dev_err(qidev, "unable to map S/G table\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+	dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+				  ivsize + req->nbytes, 0);
+
+	if (req->src == req->dst) {
+		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+				     sizeof(*sg_table), req->nbytes, 0);
+	} else if (mapped_dst_nents > 1) {
+		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+				     sizeof(*sg_table), req->nbytes, 0);
+	} else {
+		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+				 req->nbytes, 0);
+	}
+
+	return edesc;
+}
+
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+	struct skcipher_givcrypt_request *creq)
+{
+	struct ablkcipher_request *req = &creq->creq;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *qidev = ctx->qidev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	u8 *iv;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	struct qm_sg_entry *sg_table, *fd_sgt;
+	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+	struct caam_drv_ctx *drv_ctx;
+
+	drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
+	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+		return (struct ablkcipher_edesc *)drv_ctx;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (unlikely(req->src != req->dst)) {
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(qidev, "unable to map destination\n");
+			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		dst_nents = src_nents;
+		mapped_dst_nents = src_nents;
+	}
+
+	qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+	dst_sg_idx = qm_sg_ents;
+
+	qm_sg_ents += 1 + mapped_dst_nents;
+	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* allocate space for base edesc, link tables and IV */
+	edesc = qi_cache_alloc(GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(qidev, "could not allocate extended descriptor\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Make sure IV is located in a DMAable area */
+	sg_table = &edesc->sgt[0];
+	iv = (u8 *)(sg_table + qm_sg_ents);
+	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
+	if (dma_mapping_error(qidev, iv_dma)) {
+		dev_err(qidev, "unable to map IV\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->qm_sg_bytes = qm_sg_bytes;
+	edesc->drv_req.app_ctx = req;
+	edesc->drv_req.cbk = ablkcipher_done;
+	edesc->drv_req.drv_ctx = drv_ctx;
+
+	if (mapped_src_nents > 1)
+		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+
+	dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+	sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
+			 0);
+
+	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+					  DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
+		dev_err(qidev, "unable to map S/G table\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+	if (mapped_src_nents > 1)
+		dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
+				     0);
+	else
+		dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
+				 req->nbytes, 0);
+
+	dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+			     sizeof(*sg_table), ivsize + req->nbytes, 0);
+
+	return edesc;
+}
+
+static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int ret;
+
+	if (unlikely(caam_congested))
+		return -EAGAIN;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, encrypt);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block.
+	 */
+	if (!encrypt)
+		scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+					 ivsize, ivsize, 0);
+
+	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(ctx->qidev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	return ablkcipher_crypt(req, true);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	return ablkcipher_crypt(req, false);
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+	struct ablkcipher_request *req = &creq->creq;
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	int ret;
+
+	if (unlikely(caam_congested))
+		return -EAGAIN;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_giv_edesc_alloc(creq);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(ctx->qidev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
+#define template_ablkcipher	template_u.ablkcipher
+struct caam_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	u32 type;
+	union {
+		struct ablkcipher_alg ablkcipher;
+	} template_u;
+	u32 class1_alg_type;
+	u32 class2_alg_type;
+};
+
+static struct caam_alg_template driver_algs[] = {
+	/* ablkcipher descriptor */
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-caam-qi",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-caam-qi",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-caam-qi",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-caam-qi",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "chainiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "rfc3686(ctr(aes))",
+		.driver_name = "rfc3686-ctr-aes-caam-qi",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-caam-qi",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = xts_ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "eseqiv",
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+	},
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+	{
+		.aead = {
+			.base = {
+				.cra_name = "rfc4106(gcm(aes))",
+				.cra_driver_name = "rfc4106-gcm-aes-caam-qi",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4106_setkey,
+			.setauthsize = rfc4106_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = 8,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "rfc4543(gcm(aes))",
+				.cra_driver_name = "rfc4543-gcm-aes-caam-qi",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4543_setkey,
+			.setauthsize = rfc4543_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = 8,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	/* Galois Counter Mode */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "gcm(aes)",
+				.cra_driver_name = "gcm-aes-caam-qi",
+				.cra_blocksize = 1,
+			},
+			.setkey = gcm_setkey,
+			.setauthsize = gcm_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = 12,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		}
+	},
+	/* single-pass ipsec_esp descriptor */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-aes-"
+						   "caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-aes-"
+						   "caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-aes-"
+						   "caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-des-"
+						   "caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-des-"
+						   "caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-des-"
+						   "caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-des-"
+						   "caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+};
+
+struct caam_crypto_alg {
+	struct list_head entry;
+	struct crypto_alg crypto_alg;
+	struct caam_alg_entry caam;
+};
+
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+			    bool uses_dkp)
+{
+	struct caam_drv_private *priv;
+
+	/*
+	 * distribute tfms across job rings to ensure in-order
+	 * crypto request processing per tfm
+	 */
+	ctx->jrdev = caam_jr_alloc();
+	if (IS_ERR(ctx->jrdev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(ctx->jrdev);
+	}
+
+	priv = dev_get_drvdata(ctx->jrdev->parent);
+	if (priv->era >= 6 && uses_dkp)
+		ctx->dir = DMA_BIDIRECTIONAL;
+	else
+		ctx->dir = DMA_TO_DEVICE;
+
+	ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+				      ctx->dir);
+	if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+		dev_err(ctx->jrdev, "unable to map key\n");
+		caam_jr_free(ctx->jrdev);
+		return -ENOMEM;
+	}
+
+	/* copy descriptor header template value */
+	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+	ctx->qidev = priv->qidev;
+
+	spin_lock_init(&ctx->lock);
+	ctx->drv_ctx[ENCRYPT] = NULL;
+	ctx->drv_ctx[DECRYPT] = NULL;
+	ctx->drv_ctx[GIVENCRYPT] = NULL;
+
+	return 0;
+}
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+							crypto_alg);
+	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return caam_init_common(ctx, &caam_alg->caam, false);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+{
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+						      aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+	return caam_init_common(ctx, &caam_alg->caam,
+				alg->setkey == aead_setkey);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+	caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
+	caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
+	caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
+
+	dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
+
+	caam_jr_free(ctx->jrdev);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+	caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
+static void caam_aead_exit(struct crypto_aead *tfm)
+{
+	caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct list_head alg_list;
+static void __exit caam_qi_algapi_exit(void)
+{
+	struct caam_crypto_alg *t_alg, *n;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+		struct caam_aead_alg *t_alg = driver_aeads + i;
+
+		if (t_alg->registered)
+			crypto_unregister_aead(&t_alg->aead);
+	}
+
+	if (!alg_list.next)
+		return;
+
+	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+		crypto_unregister_alg(&t_alg->crypto_alg);
+		list_del(&t_alg->entry);
+		kfree(t_alg);
+	}
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+					      *template)
+{
+	struct caam_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	alg = &t_alg->crypto_alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 template->driver_name);
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = caam_cra_init;
+	alg->cra_exit = caam_cra_exit;
+	alg->cra_priority = CAAM_CRA_PRIORITY;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct caam_ctx);
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+			 template->type;
+	switch (template->type) {
+	case CRYPTO_ALG_TYPE_GIVCIPHER:
+		alg->cra_type = &crypto_givcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg->cra_type = &crypto_ablkcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	}
+
+	t_alg->caam.class1_alg_type = template->class1_alg_type;
+	t_alg->caam.class2_alg_type = template->class2_alg_type;
+
+	return t_alg;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+	struct aead_alg *alg = &t_alg->aead;
+
+	alg->base.cra_module = THIS_MODULE;
+	alg->base.cra_priority = CAAM_CRA_PRIORITY;
+	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+	alg->init = caam_aead_init;
+	alg->exit = caam_aead_exit;
+}
+
+static int __init caam_qi_algapi_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	int i = 0, err = 0;
+	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
+	bool registered = false;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	of_node_put(dev_node);
+	if (!pdev)
+		return -ENODEV;
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv || !priv->qi_present)
+		return -ENODEV;
+
+	if (caam_dpaa2) {
+		dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
+		return -ENODEV;
+	}
+
+	INIT_LIST_HEAD(&alg_list);
+
+	/*
+	 * Register crypto algorithms the device supports.
+	 * First, detect presence and attributes of DES, AES, and MD blocks.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+	/* If MD is present, limit digest size based on LP256 */
+	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+		md_limit = SHA256_DIGEST_SIZE;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		struct caam_crypto_alg *t_alg;
+		struct caam_alg_template *alg = driver_algs + i;
+		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (alg_sel == OP_ALG_ALGSEL_DES)))
+			continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+			continue;
+
+		t_alg = caam_alg_alloc(alg);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			dev_warn(priv->qidev, "%s alg allocation failed\n",
+				 alg->driver_name);
+			continue;
+		}
+
+		err = crypto_register_alg(&t_alg->crypto_alg);
+		if (err) {
+			dev_warn(priv->qidev, "%s alg registration failed\n",
+				 t_alg->crypto_alg.cra_driver_name);
+			kfree(t_alg);
+			continue;
+		}
+
+		list_add_tail(&t_alg->entry, &alg_list);
+		registered = true;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+		struct caam_aead_alg *t_alg = driver_aeads + i;
+		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+			continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+			continue;
+
+		/*
+		 * Check support for AES algorithms not available
+		 * on LP devices.
+		 */
+		if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
+		    (alg_aai == OP_ALG_AAI_GCM))
+			continue;
+
+		/*
+		 * Skip algorithms requiring message digests
+		 * if MD or MD size is not supported by device.
+		 */
+		if (c2_alg_sel &&
+		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+			continue;
+
+		caam_aead_alg_init(t_alg);
+
+		err = crypto_register_aead(&t_alg->aead);
+		if (err) {
+			pr_warn("%s alg registration failed\n",
+				t_alg->aead.base.cra_driver_name);
+			continue;
+		}
+
+		t_alg->registered = true;
+		registered = true;
+	}
+
+	if (registered)
+		dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+
+	return err;
+}
+
+module_init(caam_qi_algapi_init);
+module_exit(caam_qi_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
new file mode 100644
index 0000000..43975ab
--- /dev/null
+++ b/drivers/crypto/caam/caamhash.c
@@ -0,0 +1,1962 @@
+/*
+ * caam - Freescale FSL CAAM support for ahash functions of crypto API
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship of digest job descriptor or first job descriptor after init to
+ * shared descriptors:
+ *
+ * ---------------                     ---------------
+ * | JobDesc #1  |-------------------->|  ShareDesc  |
+ * | *(packet 1) |                     |  (hashKey)  |
+ * ---------------                     | (operation) |
+ *                                     ---------------
+ *
+ * relationship of subsequent job descriptors to shared descriptors:
+ *
+ * ---------------                     ---------------
+ * | JobDesc #2  |-------------------->|  ShareDesc  |
+ * | *(packet 2) |      |------------->|  (hashKey)  |
+ * ---------------      |    |-------->| (operation) |
+ *       .              |    |         | (load ctx2) |
+ *       .              |    |         ---------------
+ * ---------------      |    |
+ * | JobDesc #3  |------|    |
+ * | *(packet 3) |           |
+ * ---------------           |
+ *       .                   |
+ *       .                   |
+ * ---------------           |
+ * | JobDesc #4  |------------
+ * | *(packet 4) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header            |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR       |
+ * | (output buffer)   |
+ * | (output length)   |
+ * | SEQ_IN_PTR        |
+ * | (input buffer)    |
+ * | (input length)    |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+
+#define CAAM_CRA_PRIORITY		3000
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
+#define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
+
+/* length of descriptors text */
+#define DESC_AHASH_BASE			(3 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+#define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
+					 CAAM_MAX_HASH_KEY_SIZE)
+#define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN			8
+#define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+
+static struct list_head hash_list;
+
+/* ahash per-session context */
+struct caam_hash_ctx {
+	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
+	dma_addr_t sh_desc_update_first_dma;
+	dma_addr_t sh_desc_fin_dma;
+	dma_addr_t sh_desc_digest_dma;
+	enum dma_data_direction dir;
+	struct device *jrdev;
+	u8 key[CAAM_MAX_HASH_KEY_SIZE];
+	int ctx_len;
+	struct alginfo adata;
+};
+
+/* ahash state */
+struct caam_hash_state {
+	dma_addr_t buf_dma;
+	dma_addr_t ctx_dma;
+	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+	int buflen_0;
+	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+	int buflen_1;
+	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
+	int (*update)(struct ahash_request *req);
+	int (*final)(struct ahash_request *req);
+	int (*finup)(struct ahash_request *req);
+	int current_buf;
+};
+
+struct caam_export_state {
+	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
+	u8 caam_ctx[MAX_CTX_LEN];
+	int buflen;
+	int (*update)(struct ahash_request *req);
+	int (*final)(struct ahash_request *req);
+	int (*finup)(struct ahash_request *req);
+};
+
+static inline void switch_buf(struct caam_hash_state *state)
+{
+	state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+	return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+	return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
+/* Common job descriptor seq in/out ptr routines */
+
+/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
+static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+				      struct caam_hash_state *state,
+				      int ctx_len)
+{
+	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
+					ctx_len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, state->ctx_dma)) {
+		dev_err(jrdev, "unable to map ctx\n");
+		state->ctx_dma = 0;
+		return -ENOMEM;
+	}
+
+	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+
+	return 0;
+}
+
+/* Map req->result, and append seq_out_ptr command that points to it */
+static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
+						u8 *result, int digestsize)
+{
+	dma_addr_t dst_dma;
+
+	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
+	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+
+	return dst_dma;
+}
+
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_sec4_sg(struct device *jrdev,
+				     struct sec4_sg_entry *sec4_sg,
+				     struct caam_hash_state *state)
+{
+	int buflen = *current_buflen(state);
+
+	if (!buflen)
+		return 0;
+
+	state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+					DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, state->buf_dma)) {
+		dev_err(jrdev, "unable to map buf\n");
+		state->buf_dma = 0;
+		return -ENOMEM;
+	}
+
+	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
+
+	return 0;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline int ctx_map_to_sec4_sg(struct device *jrdev,
+				     struct caam_hash_state *state, int ctx_len,
+				     struct sec4_sg_entry *sec4_sg, u32 flag)
+{
+	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+	if (dma_mapping_error(jrdev, state->ctx_dma)) {
+		dev_err(jrdev, "unable to map ctx\n");
+		state->ctx_dma = 0;
+		return -ENOMEM;
+	}
+
+	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+
+	return 0;
+}
+
+/*
+ * For ahash update, final and finup (import_ctx = true)
+ *     import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ *     read and write to seqout
+ */
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+				     struct caam_hash_ctx *ctx, bool import_ctx,
+				     int era)
+{
+	u32 op = ctx->adata.algtype;
+	u32 *skip_key_load;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Append key if it has been set; ahash update excluded */
+	if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+		/* Skip key loading if already shared */
+		skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_SHRD);
+
+		if (era < 6)
+			append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+					  ctx->adata.keylen, CLASS_2 |
+					  KEY_DEST_MDHA_SPLIT | KEY_ENC);
+		else
+			append_proto_dkp(desc, &ctx->adata);
+
+		set_jump_tgt_here(desc, skip_key_load);
+
+		op |= OP_ALG_AAI_HMAC_PRECOMP;
+	}
+
+	/* If needed, import context from software */
+	if (import_ctx)
+		append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+				LDST_SRCDST_BYTE_CONTEXT);
+
+	/* Class 2 operation */
+	append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+	/*
+	 * Load from buf and/or src and write to req->result or state->context
+	 * Calculate remaining bytes to read
+	 */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	/* Read remaining bytes */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+			     FIFOLD_TYPE_MSG | KEY_VLF);
+	/* Store class2 context bytes */
+	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	u32 *desc;
+
+	ctx->adata.key_virt = ctx->key;
+
+	/* ahash_update shared descriptor */
+	desc = ctx->sh_desc_update;
+	ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
+			  ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+				   desc_bytes(desc), ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ahash update shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	/* ahash_update_first shared descriptor */
+	desc = ctx->sh_desc_update_first;
+	ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
+			  ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+				   desc_bytes(desc), ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ahash update first shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	/* ahash_final shared descriptor */
+	desc = ctx->sh_desc_fin;
+	ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
+			  ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+				   desc_bytes(desc), ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	/* ahash_digest shared descriptor */
+	desc = ctx->sh_desc_digest;
+	ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
+			  ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+				   desc_bytes(desc), ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ahash digest shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	return 0;
+}
+
+/* Digest hash size if it is too large */
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+			   u32 *keylen, u8 *key_out, u32 digestsize)
+{
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	struct split_key_result result;
+	dma_addr_t src_dma, dst_dma;
+	int ret;
+
+	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+	if (!desc) {
+		dev_err(jrdev, "unable to allocate key input memory\n");
+		return -ENOMEM;
+	}
+
+	init_job_desc(desc, 0);
+
+	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
+				 DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, src_dma)) {
+		dev_err(jrdev, "unable to map key input memory\n");
+		kfree(desc);
+		return -ENOMEM;
+	}
+	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
+				 DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, dst_dma)) {
+		dev_err(jrdev, "unable to map key output memory\n");
+		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+		kfree(desc);
+		return -ENOMEM;
+	}
+
+	/* Job descriptor to perform unkeyed hash on key_in */
+	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
+			 OP_ALG_AS_INITFINAL);
+	append_seq_in_ptr(desc, src_dma, *keylen, 0);
+	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	result.err = 0;
+	init_completion(&result.completion);
+
+	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+	if (!ret) {
+		/* in progress */
+		wait_for_completion(&result.completion);
+		ret = result.err;
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR,
+			       "digested key@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+			       digestsize, 1);
+#endif
+	}
+	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+
+	*keylen = digestsize;
+
+	kfree(desc);
+
+	return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash,
+			const u8 *key, unsigned int keylen)
+{
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+	int ret;
+	u8 *hashed_key = NULL;
+
+#ifdef DEBUG
+	printk(KERN_ERR "keylen %d\n", keylen);
+#endif
+
+	if (keylen > blocksize) {
+		hashed_key = kmalloc_array(digestsize,
+					   sizeof(*hashed_key),
+					   GFP_KERNEL | GFP_DMA);
+		if (!hashed_key)
+			return -ENOMEM;
+		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+				      digestsize);
+		if (ret)
+			goto bad_free_key;
+		key = hashed_key;
+	}
+
+	/*
+	 * If DKP is supported, use it in the shared descriptor to generate
+	 * the split key.
+	 */
+	if (ctrlpriv->era >= 6) {
+		ctx->adata.key_inline = true;
+		ctx->adata.keylen = keylen;
+		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+						      OP_ALG_ALGSEL_MASK);
+
+		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+			goto bad_free_key;
+
+		memcpy(ctx->key, key, keylen);
+	} else {
+		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
+				    keylen, CAAM_MAX_HASH_KEY_SIZE);
+		if (ret)
+			goto bad_free_key;
+	}
+
+	kfree(hashed_key);
+	return ahash_set_sh_desc(ahash);
+ bad_free_key:
+	kfree(hashed_key);
+	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: physical mapped address of req->result
+ * @sec4_sg_dma: physical mapped address of h/w link table
+ * @src_nents: number of segments in input scatterlist
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ * @sec4_sg: h/w link table
+ */
+struct ahash_edesc {
+	dma_addr_t dst_dma;
+	dma_addr_t sec4_sg_dma;
+	int src_nents;
+	int sec4_sg_bytes;
+	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
+	struct sec4_sg_entry sec4_sg[0];
+};
+
+static inline void ahash_unmap(struct device *dev,
+			struct ahash_edesc *edesc,
+			struct ahash_request *req, int dst_len)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	if (edesc->src_nents)
+		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+	if (edesc->dst_dma)
+		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+
+	if (edesc->sec4_sg_bytes)
+		dma_unmap_single(dev, edesc->sec4_sg_dma,
+				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+
+	if (state->buf_dma) {
+		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+				 DMA_TO_DEVICE);
+		state->buf_dma = 0;
+	}
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+			struct ahash_edesc *edesc,
+			struct ahash_request *req, int dst_len, u32 flag)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	if (state->ctx_dma) {
+		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+		state->ctx_dma = 0;
+	}
+	ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
+		       void *context)
+{
+	struct ahash_request *req = context;
+	struct ahash_edesc *edesc;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	ahash_unmap(jrdev, edesc, req, digestsize);
+	kfree(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+		       ctx->ctx_len, 1);
+	if (req->result)
+		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+			       digestsize, 1);
+#endif
+
+	req->base.complete(&req->base, err);
+}
+
+static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
+			    void *context)
+{
+	struct ahash_request *req = context;
+	struct ahash_edesc *edesc;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+#ifdef DEBUG
+	int digestsize = crypto_ahash_digestsize(ahash);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+	switch_buf(state);
+	kfree(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+		       ctx->ctx_len, 1);
+	if (req->result)
+		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+			       digestsize, 1);
+#endif
+
+	req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
+			       void *context)
+{
+	struct ahash_request *req = context;
+	struct ahash_edesc *edesc;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
+	kfree(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+		       ctx->ctx_len, 1);
+	if (req->result)
+		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+			       digestsize, 1);
+#endif
+
+	req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
+			       void *context)
+{
+	struct ahash_request *req = context;
+	struct ahash_edesc *edesc;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+#ifdef DEBUG
+	int digestsize = crypto_ahash_digestsize(ahash);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+	switch_buf(state);
+	kfree(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+		       ctx->ctx_len, 1);
+	if (req->result)
+		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+			       digestsize, 1);
+#endif
+
+	req->base.complete(&req->base, err);
+}
+
+/*
+ * Allocate an enhanced descriptor, which contains the hardware descriptor
+ * and space for hardware scatter table containing sg_num entries.
+ */
+static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
+					     int sg_num, u32 *sh_desc,
+					     dma_addr_t sh_desc_dma,
+					     gfp_t flags)
+{
+	struct ahash_edesc *edesc;
+	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
+
+	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+		return NULL;
+	}
+
+	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
+			     HDR_SHARE_DEFER | HDR_REVERSE);
+
+	return edesc;
+}
+
+static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
+			       struct ahash_edesc *edesc,
+			       struct ahash_request *req, int nents,
+			       unsigned int first_sg,
+			       unsigned int first_bytes, size_t to_hash)
+{
+	dma_addr_t src_dma;
+	u32 options;
+
+	if (nents > 1 || first_sg) {
+		struct sec4_sg_entry *sg = edesc->sec4_sg;
+		unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
+
+		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+
+		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(ctx->jrdev, src_dma)) {
+			dev_err(ctx->jrdev, "unable to map S/G table\n");
+			return -ENOMEM;
+		}
+
+		edesc->sec4_sg_bytes = sgsize;
+		edesc->sec4_sg_dma = src_dma;
+		options = LDST_SGF;
+	} else {
+		src_dma = sg_dma_address(req->src);
+		options = 0;
+	}
+
+	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
+			  options);
+
+	return 0;
+}
+
+/* submit update job descriptor */
+static int ahash_update_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u8 *buf = current_buf(state);
+	int *buflen = current_buflen(state);
+	u8 *next_buf = alt_buf(state);
+	int *next_buflen = alt_buflen(state), last_buflen;
+	int in_len = *buflen + req->nbytes, to_hash;
+	u32 *desc;
+	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
+	struct ahash_edesc *edesc;
+	int ret = 0;
+
+	last_buflen = *next_buflen;
+	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+	to_hash = in_len - *next_buflen;
+
+	if (to_hash) {
+		src_nents = sg_nents_for_len(req->src,
+					     req->nbytes - (*next_buflen));
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to DMA map source\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
+
+		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
+		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
+				 sizeof(struct sec4_sg_entry);
+
+		/*
+		 * allocate space for base edesc and hw desc commands,
+		 * link tables
+		 */
+		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+					  ctx->sh_desc_update,
+					  ctx->sh_desc_update_dma, flags);
+		if (!edesc) {
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+
+		edesc->src_nents = src_nents;
+		edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+		if (ret)
+			goto unmap_ctx;
+
+		if (mapped_nents) {
+			sg_to_sec4_sg_last(req->src, mapped_nents,
+					   edesc->sec4_sg + sec4_sg_src_index,
+					   0);
+			if (*next_buflen)
+				scatterwalk_map_and_copy(next_buf, req->src,
+							 to_hash - *buflen,
+							 *next_buflen, 0);
+		} else {
+			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
+					    1);
+		}
+
+		desc = edesc->hw_desc;
+
+		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+						     sec4_sg_bytes,
+						     DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+			dev_err(jrdev, "unable to map S/G table\n");
+			ret = -ENOMEM;
+			goto unmap_ctx;
+		}
+
+		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+				       to_hash, LDST_SGF);
+
+		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
+
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			       desc_bytes(desc), 1);
+#endif
+
+		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
+	} else if (*next_buflen) {
+		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+					 req->nbytes, 0);
+		*buflen = *next_buflen;
+		*next_buflen = last_buflen;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+		       *next_buflen, 1);
+#endif
+
+	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+	kfree(edesc);
+	return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int buflen = *current_buflen(state);
+	u32 *desc;
+	int sec4_sg_bytes, sec4_sg_src_index;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct ahash_edesc *edesc;
+	int ret;
+
+	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
+				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
+				  flags);
+	if (!edesc)
+		return -ENOMEM;
+
+	desc = edesc->hw_desc;
+
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+				 edesc->sec4_sg, DMA_TO_DEVICE);
+	if (ret)
+		goto unmap_ctx;
+
+	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+	if (ret)
+		goto unmap_ctx;
+
+	sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		ret = -ENOMEM;
+		goto unmap_ctx;
+	}
+
+	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
+			  LDST_SGF);
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		ret = -ENOMEM;
+		goto unmap_ctx;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+	if (ret)
+		goto unmap_ctx;
+
+	return -EINPROGRESS;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	kfree(edesc);
+	return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int buflen = *current_buflen(state);
+	u32 *desc;
+	int sec4_sg_src_index;
+	int src_nents, mapped_nents;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct ahash_edesc *edesc;
+	int ret;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
+
+	if (src_nents) {
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to DMA map source\n");
+			return -ENOMEM;
+		}
+	} else {
+		mapped_nents = 0;
+	}
+
+	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
+				  flags);
+	if (!edesc) {
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	desc = edesc->hw_desc;
+
+	edesc->src_nents = src_nents;
+
+	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+				 edesc->sec4_sg, DMA_TO_DEVICE);
+	if (ret)
+		goto unmap_ctx;
+
+	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+	if (ret)
+		goto unmap_ctx;
+
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
+				  sec4_sg_src_index, ctx->ctx_len + buflen,
+				  req->nbytes);
+	if (ret)
+		goto unmap_ctx;
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		ret = -ENOMEM;
+		goto unmap_ctx;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+	if (ret)
+		goto unmap_ctx;
+
+	return -EINPROGRESS;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	kfree(edesc);
+	return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u32 *desc;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	int src_nents, mapped_nents;
+	struct ahash_edesc *edesc;
+	int ret;
+
+	state->buf_dma = 0;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
+
+	if (src_nents) {
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to map source for DMA\n");
+			return -ENOMEM;
+		}
+	} else {
+		mapped_nents = 0;
+	}
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
+				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+				  flags);
+	if (!edesc) {
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	edesc->src_nents = src_nents;
+
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+				  req->nbytes);
+	if (ret) {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+		return ret;
+	}
+
+	desc = edesc->hw_desc;
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+		return -ENOMEM;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+/* submit ahash final if it the first job descriptor */
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u8 *buf = current_buf(state);
+	int buflen = *current_buflen(state);
+	u32 *desc;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct ahash_edesc *edesc;
+	int ret;
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
+				  ctx->sh_desc_digest_dma, flags);
+	if (!edesc)
+		return -ENOMEM;
+
+	desc = edesc->hw_desc;
+
+	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, state->buf_dma)) {
+		dev_err(jrdev, "unable to map src\n");
+		goto unmap;
+	}
+
+	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		goto unmap;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+	}
+
+	return ret;
+ unmap:
+	ahash_unmap(jrdev, edesc, req, digestsize);
+	kfree(edesc);
+	return -ENOMEM;
+
+}
+
+/* submit ahash update if it the first job descriptor after update */
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u8 *buf = current_buf(state);
+	int *buflen = current_buflen(state);
+	u8 *next_buf = alt_buf(state);
+	int *next_buflen = alt_buflen(state);
+	int in_len = *buflen + req->nbytes, to_hash;
+	int sec4_sg_bytes, src_nents, mapped_nents;
+	struct ahash_edesc *edesc;
+	u32 *desc;
+	int ret = 0;
+
+	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+	to_hash = in_len - *next_buflen;
+
+	if (to_hash) {
+		src_nents = sg_nents_for_len(req->src,
+					     req->nbytes - *next_buflen);
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to DMA map source\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
+
+		sec4_sg_bytes = (1 + mapped_nents) *
+				sizeof(struct sec4_sg_entry);
+
+		/*
+		 * allocate space for base edesc and hw desc commands,
+		 * link tables
+		 */
+		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
+					  ctx->sh_desc_update_first,
+					  ctx->sh_desc_update_first_dma,
+					  flags);
+		if (!edesc) {
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+
+		edesc->src_nents = src_nents;
+		edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
+		if (ret)
+			goto unmap_ctx;
+
+		sg_to_sec4_sg_last(req->src, mapped_nents,
+				   edesc->sec4_sg + 1, 0);
+
+		if (*next_buflen) {
+			scatterwalk_map_and_copy(next_buf, req->src,
+						 to_hash - *buflen,
+						 *next_buflen, 0);
+		}
+
+		desc = edesc->hw_desc;
+
+		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+						    sec4_sg_bytes,
+						    DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+			dev_err(jrdev, "unable to map S/G table\n");
+			ret = -ENOMEM;
+			goto unmap_ctx;
+		}
+
+		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
+
+		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+		if (ret)
+			goto unmap_ctx;
+
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			       desc_bytes(desc), 1);
+#endif
+
+		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
+		state->update = ahash_update_ctx;
+		state->finup = ahash_finup_ctx;
+		state->final = ahash_final_ctx;
+	} else if (*next_buflen) {
+		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+					 req->nbytes, 0);
+		*buflen = *next_buflen;
+		*next_buflen = 0;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+		       *next_buflen, 1);
+#endif
+
+	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	kfree(edesc);
+	return ret;
+}
+
+/* submit ahash finup if it the first job descriptor after update */
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int buflen = *current_buflen(state);
+	u32 *desc;
+	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct ahash_edesc *edesc;
+	int ret;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
+
+	if (src_nents) {
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to DMA map source\n");
+			return -ENOMEM;
+		}
+	} else {
+		mapped_nents = 0;
+	}
+
+	sec4_sg_src_index = 2;
+	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
+			 sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+				  flags);
+	if (!edesc) {
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	desc = edesc->hw_desc;
+
+	edesc->src_nents = src_nents;
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
+	if (ret)
+		goto unmap;
+
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
+				  req->nbytes);
+	if (ret) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		goto unmap;
+	}
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		goto unmap;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+	}
+
+	return ret;
+ unmap:
+	ahash_unmap(jrdev, edesc, req, digestsize);
+	kfree(edesc);
+	return -ENOMEM;
+
+}
+
+/* submit first update job descriptor after init */
+static int ahash_update_first(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u8 *next_buf = alt_buf(state);
+	int *next_buflen = alt_buflen(state);
+	int to_hash;
+	u32 *desc;
+	int src_nents, mapped_nents;
+	struct ahash_edesc *edesc;
+	int ret = 0;
+
+	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+				      1);
+	to_hash = req->nbytes - *next_buflen;
+
+	if (to_hash) {
+		src_nents = sg_nents_for_len(req->src,
+					     req->nbytes - *next_buflen);
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to map source for DMA\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
+
+		/*
+		 * allocate space for base edesc and hw desc commands,
+		 * link tables
+		 */
+		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
+					  mapped_nents : 0,
+					  ctx->sh_desc_update_first,
+					  ctx->sh_desc_update_first_dma,
+					  flags);
+		if (!edesc) {
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+
+		edesc->src_nents = src_nents;
+
+		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+					  to_hash);
+		if (ret)
+			goto unmap_ctx;
+
+		if (*next_buflen)
+			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+						 *next_buflen, 0);
+
+		desc = edesc->hw_desc;
+
+		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+		if (ret)
+			goto unmap_ctx;
+
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			       desc_bytes(desc), 1);
+#endif
+
+		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
+		state->update = ahash_update_ctx;
+		state->finup = ahash_finup_ctx;
+		state->final = ahash_final_ctx;
+	} else if (*next_buflen) {
+		state->update = ahash_update_no_ctx;
+		state->finup = ahash_finup_no_ctx;
+		state->final = ahash_final_no_ctx;
+		scatterwalk_map_and_copy(next_buf, req->src, 0,
+					 req->nbytes, 0);
+		switch_buf(state);
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+		       *next_buflen, 1);
+#endif
+
+	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	kfree(edesc);
+	return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+	return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	state->update = ahash_update_first;
+	state->finup = ahash_finup_first;
+	state->final = ahash_final_no_ctx;
+
+	state->ctx_dma = 0;
+	state->current_buf = 0;
+	state->buf_dma = 0;
+	state->buflen_0 = 0;
+	state->buflen_1 = 0;
+
+	return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct caam_export_state *export = out;
+	int len;
+	u8 *buf;
+
+	if (state->current_buf) {
+		buf = state->buf_1;
+		len = state->buflen_1;
+	} else {
+		buf = state->buf_0;
+		len = state->buflen_0;
+	}
+
+	memcpy(export->buf, buf, len);
+	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
+	export->buflen = len;
+	export->update = state->update;
+	export->final = state->final;
+	export->finup = state->finup;
+
+	return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	const struct caam_export_state *export = in;
+
+	memset(state, 0, sizeof(*state));
+	memcpy(state->buf_0, export->buf, export->buflen);
+	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
+	state->buflen_0 = export->buflen;
+	state->update = export->update;
+	state->final = export->final;
+	state->finup = export->finup;
+
+	return 0;
+}
+
+struct caam_hash_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	char hmac_name[CRYPTO_MAX_ALG_NAME];
+	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	struct ahash_alg template_ahash;
+	u32 alg_type;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+	{
+		.name = "sha1",
+		.driver_name = "sha1-caam",
+		.hmac_name = "hmac(sha1)",
+		.hmac_driver_name = "hmac-sha1-caam",
+		.blocksize = SHA1_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA1_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA1,
+	}, {
+		.name = "sha224",
+		.driver_name = "sha224-caam",
+		.hmac_name = "hmac(sha224)",
+		.hmac_driver_name = "hmac-sha224-caam",
+		.blocksize = SHA224_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA224_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA224,
+	}, {
+		.name = "sha256",
+		.driver_name = "sha256-caam",
+		.hmac_name = "hmac(sha256)",
+		.hmac_driver_name = "hmac-sha256-caam",
+		.blocksize = SHA256_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA256_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA256,
+	}, {
+		.name = "sha384",
+		.driver_name = "sha384-caam",
+		.hmac_name = "hmac(sha384)",
+		.hmac_driver_name = "hmac-sha384-caam",
+		.blocksize = SHA384_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA384_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA384,
+	}, {
+		.name = "sha512",
+		.driver_name = "sha512-caam",
+		.hmac_name = "hmac(sha512)",
+		.hmac_driver_name = "hmac-sha512-caam",
+		.blocksize = SHA512_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA512_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA512,
+	}, {
+		.name = "md5",
+		.driver_name = "md5-caam",
+		.hmac_name = "hmac(md5)",
+		.hmac_driver_name = "hmac-md5-caam",
+		.blocksize = MD5_BLOCK_WORDS * 4,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = MD5_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_MD5,
+	},
+};
+
+struct caam_hash_alg {
+	struct list_head entry;
+	int alg_type;
+	struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct crypto_alg *base = tfm->__crt_alg;
+	struct hash_alg_common *halg =
+		 container_of(base, struct hash_alg_common, base);
+	struct ahash_alg *alg =
+		 container_of(halg, struct ahash_alg, halg);
+	struct caam_hash_alg *caam_hash =
+		 container_of(alg, struct caam_hash_alg, ahash_alg);
+	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+					 HASH_MSG_LEN + 32,
+					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+					 HASH_MSG_LEN + 64,
+					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+	dma_addr_t dma_addr;
+	struct caam_drv_private *priv;
+
+	/*
+	 * Get a Job ring from Job Ring driver to ensure in-order
+	 * crypto request processing per tfm
+	 */
+	ctx->jrdev = caam_jr_alloc();
+	if (IS_ERR(ctx->jrdev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(ctx->jrdev);
+	}
+
+	priv = dev_get_drvdata(ctx->jrdev->parent);
+	ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
+					offsetof(struct caam_hash_ctx,
+						 sh_desc_update_dma),
+					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+		caam_jr_free(ctx->jrdev);
+		return -ENOMEM;
+	}
+
+	ctx->sh_desc_update_dma = dma_addr;
+	ctx->sh_desc_update_first_dma = dma_addr +
+					offsetof(struct caam_hash_ctx,
+						 sh_desc_update_first);
+	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
+						   sh_desc_fin);
+	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
+						      sh_desc_digest);
+
+	/* copy descriptor header template value */
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+
+	ctx->ctx_len = runninglen[(ctx->adata.algtype &
+				   OP_ALG_ALGSEL_SUBMASK) >>
+				  OP_ALG_ALGSEL_SHIFT];
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct caam_hash_state));
+	return ahash_set_sh_desc(ahash);
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
+			       offsetof(struct caam_hash_ctx,
+					sh_desc_update_dma),
+			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	caam_jr_free(ctx->jrdev);
+}
+
+static void __exit caam_algapi_hash_exit(void)
+{
+	struct caam_hash_alg *t_alg, *n;
+
+	if (!hash_list.next)
+		return;
+
+	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
+		crypto_unregister_ahash(&t_alg->ahash_alg);
+		list_del(&t_alg->entry);
+		kfree(t_alg);
+	}
+}
+
+static struct caam_hash_alg *
+caam_hash_alloc(struct caam_hash_template *template,
+		bool keyed)
+{
+	struct caam_hash_alg *t_alg;
+	struct ahash_alg *halg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg) {
+		pr_err("failed to allocate t_alg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	t_alg->ahash_alg = template->template_ahash;
+	halg = &t_alg->ahash_alg;
+	alg = &halg->halg.base;
+
+	if (keyed) {
+		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->hmac_name);
+		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->hmac_driver_name);
+	} else {
+		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->name);
+		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->driver_name);
+		t_alg->ahash_alg.setkey = NULL;
+	}
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = caam_hash_cra_init;
+	alg->cra_exit = caam_hash_cra_exit;
+	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+	alg->cra_priority = CAAM_CRA_PRIORITY;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_flags = CRYPTO_ALG_ASYNC;
+
+	t_alg->alg_type = template->alg_type;
+
+	return t_alg;
+}
+
+static int __init caam_algapi_hash_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	int i = 0, err = 0;
+	struct caam_drv_private *priv;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
+	u32 cha_inst, cha_vid;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+	/*
+	 * Register crypto algorithms the device supports.  First, identify
+	 * presence and attributes of MD block.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+
+	/*
+	 * Skip registration of any hashing algorithms if MD block
+	 * is not present.
+	 */
+	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+		return -ENODEV;
+
+	/* Limit digest size based on LP256 */
+	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+		md_limit = SHA256_DIGEST_SIZE;
+
+	INIT_LIST_HEAD(&hash_list);
+
+	/* register crypto algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+		struct caam_hash_alg *t_alg;
+		struct caam_hash_template *alg = driver_hash + i;
+
+		/* If MD size is not supported by device, skip registration */
+		if (alg->template_ahash.halg.digestsize > md_limit)
+			continue;
+
+		/* register hmac version */
+		t_alg = caam_hash_alloc(alg, true);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
+			continue;
+		}
+
+		err = crypto_register_ahash(&t_alg->ahash_alg);
+		if (err) {
+			pr_warn("%s alg registration failed: %d\n",
+				t_alg->ahash_alg.halg.base.cra_driver_name,
+				err);
+			kfree(t_alg);
+		} else
+			list_add_tail(&t_alg->entry, &hash_list);
+
+		/* register unkeyed version */
+		t_alg = caam_hash_alloc(alg, false);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
+			continue;
+		}
+
+		err = crypto_register_ahash(&t_alg->ahash_alg);
+		if (err) {
+			pr_warn("%s alg registration failed: %d\n",
+				t_alg->ahash_alg.halg.base.cra_driver_name,
+				err);
+			kfree(t_alg);
+		} else
+			list_add_tail(&t_alg->entry, &hash_list);
+	}
+
+	return err;
+}
+
+module_init(caam_algapi_hash_init);
+module_exit(caam_algapi_hash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
new file mode 100644
index 0000000..f26d62e
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.c
@@ -0,0 +1,1074 @@
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "sg_sw_sec4.h"
+#include "caampkc.h"
+
+#define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
+#define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
+				 sizeof(struct rsa_priv_f1_pdb))
+#define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
+				 sizeof(struct rsa_priv_f2_pdb))
+#define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
+				 sizeof(struct rsa_priv_f3_pdb))
+
+static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
+			 struct akcipher_request *req)
+{
+	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
+	dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+
+	if (edesc->sec4_sg_bytes)
+		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
+				 DMA_TO_DEVICE);
+}
+
+static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
+			  struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
+
+	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
+}
+
+static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
+			      struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
+
+	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+}
+
+static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
+			      struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+	size_t p_sz = key->p_sz;
+	size_t q_sz = key->q_sz;
+
+	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
+}
+
+static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
+			      struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+	size_t p_sz = key->p_sz;
+	size_t q_sz = key->q_sz;
+
+	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
+}
+
+/* RSA Job Completion handler */
+static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
+{
+	struct akcipher_request *req = context;
+	struct rsa_edesc *edesc;
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+	rsa_pub_unmap(dev, edesc, req);
+	rsa_io_unmap(dev, edesc, req);
+	kfree(edesc);
+
+	akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
+			     void *context)
+{
+	struct akcipher_request *req = context;
+	struct rsa_edesc *edesc;
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+	rsa_priv_f1_unmap(dev, edesc, req);
+	rsa_io_unmap(dev, edesc, req);
+	kfree(edesc);
+
+	akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
+			     void *context)
+{
+	struct akcipher_request *req = context;
+	struct rsa_edesc *edesc;
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+	rsa_priv_f2_unmap(dev, edesc, req);
+	rsa_io_unmap(dev, edesc, req);
+	kfree(edesc);
+
+	akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
+			     void *context)
+{
+	struct akcipher_request *req = context;
+	struct rsa_edesc *edesc;
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+	rsa_priv_f3_unmap(dev, edesc, req);
+	rsa_io_unmap(dev, edesc, req);
+	kfree(edesc);
+
+	akcipher_request_complete(req, err);
+}
+
+static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
+					unsigned int nbytes,
+					unsigned int flags)
+{
+	struct sg_mapping_iter miter;
+	int lzeros, ents;
+	unsigned int len;
+	unsigned int tbytes = nbytes;
+	const u8 *buff;
+
+	ents = sg_nents_for_len(sgl, nbytes);
+	if (ents < 0)
+		return ents;
+
+	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
+
+	lzeros = 0;
+	len = 0;
+	while (nbytes > 0) {
+		while (len && !*buff) {
+			lzeros++;
+			len--;
+			buff++;
+		}
+
+		if (len && *buff)
+			break;
+
+		sg_miter_next(&miter);
+		buff = miter.addr;
+		len = miter.length;
+
+		nbytes -= lzeros;
+		lzeros = 0;
+	}
+
+	miter.consumed = lzeros;
+	sg_miter_stop(&miter);
+	nbytes -= lzeros;
+
+	return tbytes - nbytes;
+}
+
+static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+					 size_t desclen)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *dev = ctx->dev;
+	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+	struct rsa_edesc *edesc;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
+	int sgc;
+	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+	int src_nents, dst_nents;
+	int lzeros;
+
+	lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
+	if (lzeros < 0)
+		return ERR_PTR(lzeros);
+
+	req->src_len -= lzeros;
+	req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+
+	src_nents = sg_nents_for_len(req->src, req->src_len);
+	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
+
+	if (src_nents > 1)
+		sec4_sg_len = src_nents;
+	if (dst_nents > 1)
+		sec4_sg_len += dst_nents;
+
+	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc, hw desc commands and link tables */
+	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
+			GFP_DMA | flags);
+	if (!edesc)
+		return ERR_PTR(-ENOMEM);
+
+	sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+	if (unlikely(!sgc)) {
+		dev_err(dev, "unable to map source\n");
+		goto src_fail;
+	}
+
+	sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+	if (unlikely(!sgc)) {
+		dev_err(dev, "unable to map destination\n");
+		goto dst_fail;
+	}
+
+	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
+
+	sec4_sg_index = 0;
+	if (src_nents > 1) {
+		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+		sec4_sg_index += src_nents;
+	}
+	if (dst_nents > 1)
+		sg_to_sec4_sg_last(req->dst, dst_nents,
+				   edesc->sec4_sg + sec4_sg_index, 0);
+
+	/* Save nents for later use in Job Descriptor */
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+
+	if (!sec4_sg_bytes)
+		return edesc;
+
+	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
+		dev_err(dev, "unable to map S/G table\n");
+		goto sec4_sg_fail;
+	}
+
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+	return edesc;
+
+sec4_sg_fail:
+	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+dst_fail:
+	dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+src_fail:
+	kfree(edesc);
+	return ERR_PTR(-ENOMEM);
+}
+
+static int set_rsa_pub_pdb(struct akcipher_request *req,
+			   struct rsa_edesc *edesc)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *dev = ctx->dev;
+	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
+	int sec4_sg_index = 0;
+
+	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->n_dma)) {
+		dev_err(dev, "Unable to map RSA modulus memory\n");
+		return -ENOMEM;
+	}
+
+	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->e_dma)) {
+		dev_err(dev, "Unable to map RSA public exponent memory\n");
+		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	if (edesc->src_nents > 1) {
+		pdb->sgf |= RSA_PDB_SGF_F;
+		pdb->f_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+	} else {
+		pdb->f_dma = sg_dma_address(req->src);
+	}
+
+	if (edesc->dst_nents > 1) {
+		pdb->sgf |= RSA_PDB_SGF_G;
+		pdb->g_dma = edesc->sec4_sg_dma +
+			     sec4_sg_index * sizeof(struct sec4_sg_entry);
+	} else {
+		pdb->g_dma = sg_dma_address(req->dst);
+	}
+
+	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
+	pdb->f_len = req->src_len;
+
+	return 0;
+}
+
+static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
+			       struct rsa_edesc *edesc)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *dev = ctx->dev;
+	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
+	int sec4_sg_index = 0;
+
+	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->n_dma)) {
+		dev_err(dev, "Unable to map modulus memory\n");
+		return -ENOMEM;
+	}
+
+	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->d_dma)) {
+		dev_err(dev, "Unable to map RSA private exponent memory\n");
+		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	if (edesc->src_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+		pdb->g_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+	} else {
+		pdb->g_dma = sg_dma_address(req->src);
+	}
+
+	if (edesc->dst_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+		pdb->f_dma = edesc->sec4_sg_dma +
+			     sec4_sg_index * sizeof(struct sec4_sg_entry);
+	} else {
+		pdb->f_dma = sg_dma_address(req->dst);
+	}
+
+	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
+
+	return 0;
+}
+
+static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
+			       struct rsa_edesc *edesc)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *dev = ctx->dev;
+	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+	int sec4_sg_index = 0;
+	size_t p_sz = key->p_sz;
+	size_t q_sz = key->q_sz;
+
+	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->d_dma)) {
+		dev_err(dev, "Unable to map RSA private exponent memory\n");
+		return -ENOMEM;
+	}
+
+	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->p_dma)) {
+		dev_err(dev, "Unable to map RSA prime factor p memory\n");
+		goto unmap_d;
+	}
+
+	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->q_dma)) {
+		dev_err(dev, "Unable to map RSA prime factor q memory\n");
+		goto unmap_p;
+	}
+
+	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
+		dev_err(dev, "Unable to map RSA tmp1 memory\n");
+		goto unmap_q;
+	}
+
+	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
+		dev_err(dev, "Unable to map RSA tmp2 memory\n");
+		goto unmap_tmp1;
+	}
+
+	if (edesc->src_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+		pdb->g_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+	} else {
+		pdb->g_dma = sg_dma_address(req->src);
+	}
+
+	if (edesc->dst_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+		pdb->f_dma = edesc->sec4_sg_dma +
+			     sec4_sg_index * sizeof(struct sec4_sg_entry);
+	} else {
+		pdb->f_dma = sg_dma_address(req->dst);
+	}
+
+	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
+	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
+
+	return 0;
+
+unmap_tmp1:
+	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+unmap_q:
+	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+unmap_p:
+	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+unmap_d:
+	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+
+	return -ENOMEM;
+}
+
+static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
+			       struct rsa_edesc *edesc)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *dev = ctx->dev;
+	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+	int sec4_sg_index = 0;
+	size_t p_sz = key->p_sz;
+	size_t q_sz = key->q_sz;
+
+	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->p_dma)) {
+		dev_err(dev, "Unable to map RSA prime factor p memory\n");
+		return -ENOMEM;
+	}
+
+	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->q_dma)) {
+		dev_err(dev, "Unable to map RSA prime factor q memory\n");
+		goto unmap_p;
+	}
+
+	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->dp_dma)) {
+		dev_err(dev, "Unable to map RSA exponent dp memory\n");
+		goto unmap_q;
+	}
+
+	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->dq_dma)) {
+		dev_err(dev, "Unable to map RSA exponent dq memory\n");
+		goto unmap_dp;
+	}
+
+	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->c_dma)) {
+		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
+		goto unmap_dq;
+	}
+
+	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
+		dev_err(dev, "Unable to map RSA tmp1 memory\n");
+		goto unmap_qinv;
+	}
+
+	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
+		dev_err(dev, "Unable to map RSA tmp2 memory\n");
+		goto unmap_tmp1;
+	}
+
+	if (edesc->src_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+		pdb->g_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+	} else {
+		pdb->g_dma = sg_dma_address(req->src);
+	}
+
+	if (edesc->dst_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+		pdb->f_dma = edesc->sec4_sg_dma +
+			     sec4_sg_index * sizeof(struct sec4_sg_entry);
+	} else {
+		pdb->f_dma = sg_dma_address(req->dst);
+	}
+
+	pdb->sgf |= key->n_sz;
+	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
+
+	return 0;
+
+unmap_tmp1:
+	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+unmap_qinv:
+	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
+unmap_dq:
+	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
+unmap_dp:
+	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
+unmap_q:
+	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+unmap_p:
+	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+
+	return -ENOMEM;
+}
+
+static int caam_rsa_enc(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *jrdev = ctx->dev;
+	struct rsa_edesc *edesc;
+	int ret;
+
+	if (unlikely(!key->n || !key->e))
+		return -EINVAL;
+
+	if (req->dst_len < key->n_sz) {
+		req->dst_len = key->n_sz;
+		dev_err(jrdev, "Output buffer length less than parameter n\n");
+		return -EOVERFLOW;
+	}
+
+	/* Allocate extended descriptor */
+	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Set RSA Encrypt Protocol Data Block */
+	ret = set_rsa_pub_pdb(req, edesc);
+	if (ret)
+		goto init_fail;
+
+	/* Initialize Job Descriptor */
+	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
+
+	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
+	if (!ret)
+		return -EINPROGRESS;
+
+	rsa_pub_unmap(jrdev, edesc, req);
+
+init_fail:
+	rsa_io_unmap(jrdev, edesc, req);
+	kfree(edesc);
+	return ret;
+}
+
+static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *jrdev = ctx->dev;
+	struct rsa_edesc *edesc;
+	int ret;
+
+	/* Allocate extended descriptor */
+	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
+	ret = set_rsa_priv_f1_pdb(req, edesc);
+	if (ret)
+		goto init_fail;
+
+	/* Initialize Job Descriptor */
+	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
+
+	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
+	if (!ret)
+		return -EINPROGRESS;
+
+	rsa_priv_f1_unmap(jrdev, edesc, req);
+
+init_fail:
+	rsa_io_unmap(jrdev, edesc, req);
+	kfree(edesc);
+	return ret;
+}
+
+static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *jrdev = ctx->dev;
+	struct rsa_edesc *edesc;
+	int ret;
+
+	/* Allocate extended descriptor */
+	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
+	ret = set_rsa_priv_f2_pdb(req, edesc);
+	if (ret)
+		goto init_fail;
+
+	/* Initialize Job Descriptor */
+	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
+
+	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
+	if (!ret)
+		return -EINPROGRESS;
+
+	rsa_priv_f2_unmap(jrdev, edesc, req);
+
+init_fail:
+	rsa_io_unmap(jrdev, edesc, req);
+	kfree(edesc);
+	return ret;
+}
+
+static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *jrdev = ctx->dev;
+	struct rsa_edesc *edesc;
+	int ret;
+
+	/* Allocate extended descriptor */
+	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
+	ret = set_rsa_priv_f3_pdb(req, edesc);
+	if (ret)
+		goto init_fail;
+
+	/* Initialize Job Descriptor */
+	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
+
+	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
+	if (!ret)
+		return -EINPROGRESS;
+
+	rsa_priv_f3_unmap(jrdev, edesc, req);
+
+init_fail:
+	rsa_io_unmap(jrdev, edesc, req);
+	kfree(edesc);
+	return ret;
+}
+
+static int caam_rsa_dec(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	int ret;
+
+	if (unlikely(!key->n || !key->d))
+		return -EINVAL;
+
+	if (req->dst_len < key->n_sz) {
+		req->dst_len = key->n_sz;
+		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
+		return -EOVERFLOW;
+	}
+
+	if (key->priv_form == FORM3)
+		ret = caam_rsa_dec_priv_f3(req);
+	else if (key->priv_form == FORM2)
+		ret = caam_rsa_dec_priv_f2(req);
+	else
+		ret = caam_rsa_dec_priv_f1(req);
+
+	return ret;
+}
+
+static void caam_rsa_free_key(struct caam_rsa_key *key)
+{
+	kzfree(key->d);
+	kzfree(key->p);
+	kzfree(key->q);
+	kzfree(key->dp);
+	kzfree(key->dq);
+	kzfree(key->qinv);
+	kzfree(key->tmp1);
+	kzfree(key->tmp2);
+	kfree(key->e);
+	kfree(key->n);
+	memset(key, 0, sizeof(*key));
+}
+
+static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
+{
+	while (!**ptr && *nbytes) {
+		(*ptr)++;
+		(*nbytes)--;
+	}
+}
+
+/**
+ * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
+ * dP, dQ and qInv could decode to less than corresponding p, q length, as the
+ * BER-encoding requires that the minimum number of bytes be used to encode the
+ * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
+ * length.
+ *
+ * @ptr   : pointer to {dP, dQ, qInv} CRT member
+ * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
+ * @dstlen: length in bytes of corresponding p or q prime factor
+ */
+static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
+{
+	u8 *dst;
+
+	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
+	if (!nbytes)
+		return NULL;
+
+	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
+	if (!dst)
+		return NULL;
+
+	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
+
+	return dst;
+}
+
+/**
+ * caam_read_raw_data - Read a raw byte stream as a positive integer.
+ * The function skips buffer's leading zeros, copies the remained data
+ * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
+ * the address of the new buffer.
+ *
+ * @buf   : The data to read
+ * @nbytes: The amount of data to read
+ */
+static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
+{
+
+	caam_rsa_drop_leading_zeros(&buf, nbytes);
+	if (!*nbytes)
+		return NULL;
+
+	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
+}
+
+static int caam_rsa_check_key_length(unsigned int len)
+{
+	if (len > 4096)
+		return -EINVAL;
+	return 0;
+}
+
+static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+				unsigned int keylen)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct rsa_key raw_key = {NULL};
+	struct caam_rsa_key *rsa_key = &ctx->key;
+	int ret;
+
+	/* Free the old RSA key if any */
+	caam_rsa_free_key(rsa_key);
+
+	ret = rsa_parse_pub_key(&raw_key, key, keylen);
+	if (ret)
+		return ret;
+
+	/* Copy key in DMA zone */
+	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->e)
+		goto err;
+
+	/*
+	 * Skip leading zeros and copy the positive integer to a buffer
+	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+	 * expects a positive integer for the RSA modulus and uses its length as
+	 * decryption output length.
+	 */
+	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
+	if (!rsa_key->n)
+		goto err;
+
+	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
+		caam_rsa_free_key(rsa_key);
+		return -EINVAL;
+	}
+
+	rsa_key->e_sz = raw_key.e_sz;
+	rsa_key->n_sz = raw_key.n_sz;
+
+	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
+	return 0;
+err:
+	caam_rsa_free_key(rsa_key);
+	return -ENOMEM;
+}
+
+static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+				       struct rsa_key *raw_key)
+{
+	struct caam_rsa_key *rsa_key = &ctx->key;
+	size_t p_sz = raw_key->p_sz;
+	size_t q_sz = raw_key->q_sz;
+
+	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
+	if (!rsa_key->p)
+		return;
+	rsa_key->p_sz = p_sz;
+
+	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
+	if (!rsa_key->q)
+		goto free_p;
+	rsa_key->q_sz = q_sz;
+
+	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->tmp1)
+		goto free_q;
+
+	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->tmp2)
+		goto free_tmp1;
+
+	rsa_key->priv_form = FORM2;
+
+	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
+	if (!rsa_key->dp)
+		goto free_tmp2;
+
+	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
+	if (!rsa_key->dq)
+		goto free_dp;
+
+	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
+					  q_sz);
+	if (!rsa_key->qinv)
+		goto free_dq;
+
+	rsa_key->priv_form = FORM3;
+
+	return;
+
+free_dq:
+	kzfree(rsa_key->dq);
+free_dp:
+	kzfree(rsa_key->dp);
+free_tmp2:
+	kzfree(rsa_key->tmp2);
+free_tmp1:
+	kzfree(rsa_key->tmp1);
+free_q:
+	kzfree(rsa_key->q);
+free_p:
+	kzfree(rsa_key->p);
+}
+
+static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+				 unsigned int keylen)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct rsa_key raw_key = {NULL};
+	struct caam_rsa_key *rsa_key = &ctx->key;
+	int ret;
+
+	/* Free the old RSA key if any */
+	caam_rsa_free_key(rsa_key);
+
+	ret = rsa_parse_priv_key(&raw_key, key, keylen);
+	if (ret)
+		return ret;
+
+	/* Copy key in DMA zone */
+	rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->d)
+		goto err;
+
+	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->e)
+		goto err;
+
+	/*
+	 * Skip leading zeros and copy the positive integer to a buffer
+	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+	 * expects a positive integer for the RSA modulus and uses its length as
+	 * decryption output length.
+	 */
+	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
+	if (!rsa_key->n)
+		goto err;
+
+	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
+		caam_rsa_free_key(rsa_key);
+		return -EINVAL;
+	}
+
+	rsa_key->d_sz = raw_key.d_sz;
+	rsa_key->e_sz = raw_key.e_sz;
+	rsa_key->n_sz = raw_key.n_sz;
+
+	memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
+	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
+	caam_rsa_set_priv_key_form(ctx, &raw_key);
+
+	return 0;
+
+err:
+	caam_rsa_free_key(rsa_key);
+	return -ENOMEM;
+}
+
+static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	return ctx->key.n_sz;
+}
+
+/* Per session pkc's driver context creation function */
+static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	ctx->dev = caam_jr_alloc();
+
+	if (IS_ERR(ctx->dev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(ctx->dev);
+	}
+
+	return 0;
+}
+
+/* Per session pkc's driver context cleanup function */
+static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+
+	caam_rsa_free_key(key);
+	caam_jr_free(ctx->dev);
+}
+
+static struct akcipher_alg caam_rsa = {
+	.encrypt = caam_rsa_enc,
+	.decrypt = caam_rsa_dec,
+	.sign = caam_rsa_dec,
+	.verify = caam_rsa_enc,
+	.set_pub_key = caam_rsa_set_pub_key,
+	.set_priv_key = caam_rsa_set_priv_key,
+	.max_size = caam_rsa_max_size,
+	.init = caam_rsa_init_tfm,
+	.exit = caam_rsa_exit_tfm,
+	.reqsize = sizeof(struct caam_rsa_req_ctx),
+	.base = {
+		.cra_name = "rsa",
+		.cra_driver_name = "rsa-caam",
+		.cra_priority = 3000,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct caam_rsa_ctx),
+	},
+};
+
+/* Public Key Cryptography module initialization handler */
+static int __init caam_pkc_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	u32 cha_inst, pk_inst;
+	int err;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+	/* Determine public key hardware accelerator presence. */
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+	pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+
+	/* Do not register algorithms if PKHA is not present. */
+	if (!pk_inst)
+		return -ENODEV;
+
+	err = crypto_register_akcipher(&caam_rsa);
+	if (err)
+		dev_warn(ctrldev, "%s alg registration failed\n",
+			 caam_rsa.base.cra_driver_name);
+	else
+		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+
+	return err;
+}
+
+static void __exit caam_pkc_exit(void)
+{
+	crypto_unregister_akcipher(&caam_rsa);
+}
+
+module_init(caam_pkc_init);
+module_exit(caam_pkc_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
new file mode 100644
index 0000000..82645bc
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+
+#ifndef _PKC_DESC_H_
+#define _PKC_DESC_H_
+#include "compat.h"
+#include "pdb.h"
+
+/**
+ * caam_priv_key_form - CAAM RSA private key representation
+ * CAAM RSA private key may have either of three forms.
+ *
+ * 1. The first representation consists of the pair (n, d), where the
+ *    components have the following meanings:
+ *        n      the RSA modulus
+ *        d      the RSA private exponent
+ *
+ * 2. The second representation consists of the triplet (p, q, d), where the
+ *    components have the following meanings:
+ *        p      the first prime factor of the RSA modulus n
+ *        q      the second prime factor of the RSA modulus n
+ *        d      the RSA private exponent
+ *
+ * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
+ *    where the components have the following meanings:
+ *        p      the first prime factor of the RSA modulus n
+ *        q      the second prime factor of the RSA modulus n
+ *        dP     the first factors's CRT exponent
+ *        dQ     the second factors's CRT exponent
+ *        qInv   the (first) CRT coefficient
+ *
+ * The benefit of using the third or the second key form is lower computational
+ * cost for the decryption and signature operations.
+ */
+enum caam_priv_key_form {
+	FORM1,
+	FORM2,
+	FORM3
+};
+
+/**
+ * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
+ * @n           : RSA modulus raw byte stream
+ * @e           : RSA public exponent raw byte stream
+ * @d           : RSA private exponent raw byte stream
+ * @p           : RSA prime factor p of RSA modulus n
+ * @q           : RSA prime factor q of RSA modulus n
+ * @dp          : RSA CRT exponent of p
+ * @dp          : RSA CRT exponent of q
+ * @qinv        : RSA CRT coefficient
+ * @tmp1        : CAAM uses this temporary buffer as internal state buffer.
+ *                It is assumed to be as long as p.
+ * @tmp2        : CAAM uses this temporary buffer as internal state buffer.
+ *                It is assumed to be as long as q.
+ * @n_sz        : length in bytes of RSA modulus n
+ * @e_sz        : length in bytes of RSA public exponent
+ * @d_sz        : length in bytes of RSA private exponent
+ * @p_sz        : length in bytes of RSA prime factor p of RSA modulus n
+ * @q_sz        : length in bytes of RSA prime factor q of RSA modulus n
+ * @priv_form   : CAAM RSA private key representation
+ */
+struct caam_rsa_key {
+	u8 *n;
+	u8 *e;
+	u8 *d;
+	u8 *p;
+	u8 *q;
+	u8 *dp;
+	u8 *dq;
+	u8 *qinv;
+	u8 *tmp1;
+	u8 *tmp2;
+	size_t n_sz;
+	size_t e_sz;
+	size_t d_sz;
+	size_t p_sz;
+	size_t q_sz;
+	enum caam_priv_key_form priv_form;
+};
+
+/**
+ * caam_rsa_ctx - per session context.
+ * @key         : RSA key in DMA zone
+ * @dev         : device structure
+ */
+struct caam_rsa_ctx {
+	struct caam_rsa_key key;
+	struct device *dev;
+};
+
+/**
+ * caam_rsa_req_ctx - per request context.
+ * @src: input scatterlist (stripped of leading zeros)
+ */
+struct caam_rsa_req_ctx {
+	struct scatterlist src[2];
+};
+
+/**
+ * rsa_edesc - s/w-extended rsa descriptor
+ * @src_nents     : number of segments in input scatterlist
+ * @dst_nents     : number of segments in output scatterlist
+ * @sec4_sg_bytes : length of h/w link table
+ * @sec4_sg_dma   : dma address of h/w link table
+ * @sec4_sg       : pointer to h/w link table
+ * @pdb           : specific RSA Protocol Data Block (PDB)
+ * @hw_desc       : descriptor followed by link tables if any
+ */
+struct rsa_edesc {
+	int src_nents;
+	int dst_nents;
+	int sec4_sg_bytes;
+	dma_addr_t sec4_sg_dma;
+	struct sec4_sg_entry *sec4_sg;
+	union {
+		struct rsa_pub_pdb pub;
+		struct rsa_priv_f1_pdb priv_f1;
+		struct rsa_priv_f2_pdb priv_f2;
+		struct rsa_priv_f3_pdb priv_f3;
+	} pdb;
+	u32 hw_desc[];
+};
+
+/* Descriptor construction primitives. */
+void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
+void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
+void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
+void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
+
+#endif
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
new file mode 100644
index 0000000..fde07d4
--- /dev/null
+++ b/drivers/crypto/caam/caamrng.c
@@ -0,0 +1,370 @@
+/*
+ * caam - Freescale FSL CAAM support for hw_random
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship between job descriptors to shared descriptors:
+ *
+ * ---------------                     --------------
+ * | JobDesc #0  |-------------------->| ShareDesc  |
+ * | *(buffer 0) |      |------------->| (generate) |
+ * ---------------      |              | (move)     |
+ *                      |              | (store)    |
+ * ---------------      |              --------------
+ * | JobDesc #1  |------|
+ * | *(buffer 1) |
+ * ---------------
+ *
+ * A job desc looks like this:
+ *
+ * ---------------------
+ * | Header            |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR       |
+ * | (output buffer)   |
+ * ---------------------
+ *
+ * The SharedDesc never changes, and each job descriptor points to one of two
+ * buffers for each device, from which the data will be copied into the
+ * requested destination
+ */
+
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/atomic.h>
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * Maximum buffer size: maximum number of random, cache-aligned bytes that
+ * will be generated and moved to seq out ptr (extlen not allowed)
+ */
+#define RN_BUF_SIZE			(0xffff / L1_CACHE_BYTES * \
+					 L1_CACHE_BYTES)
+
+/* length of descriptors */
+#define DESC_JOB_O_LEN			(CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
+#define DESC_RNG_LEN			(3 * CAAM_CMD_SZ)
+
+/* Buffer, its dma address and lock */
+struct buf_data {
+	u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
+	dma_addr_t addr;
+	struct completion filled;
+	u32 hw_desc[DESC_JOB_O_LEN];
+#define BUF_NOT_EMPTY 0
+#define BUF_EMPTY 1
+#define BUF_PENDING 2  /* Empty, but with job pending --don't submit another */
+	atomic_t empty;
+};
+
+/* rng per-device context */
+struct caam_rng_ctx {
+	struct device *jrdev;
+	dma_addr_t sh_desc_dma;
+	u32 sh_desc[DESC_RNG_LEN];
+	unsigned int cur_buf_idx;
+	int current_buf;
+	struct buf_data bufs[2];
+};
+
+static struct caam_rng_ctx *rng_ctx;
+
+static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
+{
+	if (bd->addr)
+		dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
+				 DMA_FROM_DEVICE);
+}
+
+static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
+{
+	struct device *jrdev = ctx->jrdev;
+
+	if (ctx->sh_desc_dma)
+		dma_unmap_single(jrdev, ctx->sh_desc_dma,
+				 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
+	rng_unmap_buf(jrdev, &ctx->bufs[0]);
+	rng_unmap_buf(jrdev, &ctx->bufs[1]);
+}
+
+static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
+{
+	struct buf_data *bd;
+
+	bd = container_of(desc, struct buf_data, hw_desc[0]);
+
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	atomic_set(&bd->empty, BUF_NOT_EMPTY);
+	complete(&bd->filled);
+
+	/* Buffer refilled, invalidate cache */
+	dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
+#endif
+}
+
+static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
+{
+	struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc = bd->hw_desc;
+	int err;
+
+	dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
+	init_completion(&bd->filled);
+	err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
+	if (err)
+		complete(&bd->filled); /* don't wait on failed job*/
+	else
+		atomic_inc(&bd->empty); /* note if pending */
+
+	return err;
+}
+
+static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct caam_rng_ctx *ctx = rng_ctx;
+	struct buf_data *bd = &ctx->bufs[ctx->current_buf];
+	int next_buf_idx, copied_idx;
+	int err;
+
+	if (atomic_read(&bd->empty)) {
+		/* try to submit job if there wasn't one */
+		if (atomic_read(&bd->empty) == BUF_EMPTY) {
+			err = submit_job(ctx, 1);
+			/* if can't submit job, can't even wait */
+			if (err)
+				return 0;
+		}
+		/* no immediate data, so exit if not waiting */
+		if (!wait)
+			return 0;
+
+		/* waiting for pending job */
+		if (atomic_read(&bd->empty))
+			wait_for_completion(&bd->filled);
+	}
+
+	next_buf_idx = ctx->cur_buf_idx + max;
+	dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
+		 __func__, ctx->current_buf, ctx->cur_buf_idx);
+
+	/* if enough data in current buffer */
+	if (next_buf_idx < RN_BUF_SIZE) {
+		memcpy(data, bd->buf + ctx->cur_buf_idx, max);
+		ctx->cur_buf_idx = next_buf_idx;
+		return max;
+	}
+
+	/* else, copy what's left... */
+	copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
+	memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
+	ctx->cur_buf_idx = 0;
+	atomic_set(&bd->empty, BUF_EMPTY);
+
+	/* ...refill... */
+	submit_job(ctx, 1);
+
+	/* and use next buffer */
+	ctx->current_buf = !ctx->current_buf;
+	dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
+
+	/* since there already is some data read, don't wait */
+	return copied_idx + caam_read(rng, data + copied_idx,
+				      max - copied_idx, false);
+}
+
+static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
+{
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc = ctx->sh_desc;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Generate random bytes */
+	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
+
+	/* Store bytes */
+	append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
+
+	ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+					  DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+		       desc, desc_bytes(desc), 1);
+#endif
+	return 0;
+}
+
+static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+{
+	struct device *jrdev = ctx->jrdev;
+	struct buf_data *bd = &ctx->bufs[buf_id];
+	u32 *desc = bd->hw_desc;
+	int sh_len = desc_len(ctx->sh_desc);
+
+	init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
+			     HDR_REVERSE);
+
+	bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, bd->addr)) {
+		dev_err(jrdev, "unable to map dst\n");
+		return -ENOMEM;
+	}
+
+	append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+		       desc, desc_bytes(desc), 1);
+#endif
+	return 0;
+}
+
+static void caam_cleanup(struct hwrng *rng)
+{
+	int i;
+	struct buf_data *bd;
+
+	for (i = 0; i < 2; i++) {
+		bd = &rng_ctx->bufs[i];
+		if (atomic_read(&bd->empty) == BUF_PENDING)
+			wait_for_completion(&bd->filled);
+	}
+
+	rng_unmap_ctx(rng_ctx);
+}
+
+static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+{
+	struct buf_data *bd = &ctx->bufs[buf_id];
+	int err;
+
+	err = rng_create_job_desc(ctx, buf_id);
+	if (err)
+		return err;
+
+	atomic_set(&bd->empty, BUF_EMPTY);
+	submit_job(ctx, buf_id == ctx->current_buf);
+	wait_for_completion(&bd->filled);
+
+	return 0;
+}
+
+static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+{
+	int err;
+
+	ctx->jrdev = jrdev;
+
+	err = rng_create_sh_desc(ctx);
+	if (err)
+		return err;
+
+	ctx->current_buf = 0;
+	ctx->cur_buf_idx = 0;
+
+	err = caam_init_buf(ctx, 0);
+	if (err)
+		return err;
+
+	return caam_init_buf(ctx, 1);
+}
+
+static struct hwrng caam_rng = {
+	.name		= "rng-caam",
+	.cleanup	= caam_cleanup,
+	.read		= caam_read,
+};
+
+static void __exit caam_rng_exit(void)
+{
+	caam_jr_free(rng_ctx->jrdev);
+	hwrng_unregister(&caam_rng);
+	kfree(rng_ctx);
+}
+
+static int __init caam_rng_init(void)
+{
+	struct device *dev;
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	int err;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+	/* Check for an instantiated RNG before registration */
+	if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+		return -ENODEV;
+
+	dev = caam_jr_alloc();
+	if (IS_ERR(dev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(dev);
+	}
+	rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
+	if (!rng_ctx) {
+		err = -ENOMEM;
+		goto free_caam_alloc;
+	}
+	err = caam_init_rng(rng_ctx, dev);
+	if (err)
+		goto free_rng_ctx;
+
+	dev_info(dev, "registering rng-caam\n");
+	return hwrng_register(&caam_rng);
+
+free_rng_ctx:
+	kfree(rng_ctx);
+free_caam_alloc:
+	caam_jr_free(dev);
+	return err;
+}
+
+module_init(caam_rng_init);
+module_exit(caam_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
new file mode 100644
index 0000000..1c71e0c
--- /dev/null
+++ b/drivers/crypto/caam/compat.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_COMPAT_H
+#define CAAM_COMPAT_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/hash.h>
+#include <linux/hw_random.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <net/xfrm.h>
+
+#include <crypto/algapi.h>
+#include <crypto/null.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/gcm.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/akcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+
+#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
new file mode 100644
index 0000000..538c01f
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.c
@@ -0,0 +1,907 @@
+/* * CAAM control-plane driver backend
+ * Controller-level driver, kernel property detection, initialization
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sys_soc.h>
+
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+#include "desc_constr.h"
+#include "ctrl.h"
+
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+bool caam_dpaa2;
+EXPORT_SYMBOL(caam_dpaa2);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+
+#ifdef CONFIG_CAAM_QI
+#include "qi.h"
+#endif
+
+/*
+ * i.MX targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device.
+ */
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+						char *clk_name)
+{
+	return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
+}
+
+/*
+ * Descriptor to instantiate RNG State Handle 0 in normal mode and
+ * load the JDKEK, TDKEK and TDSK registers
+ */
+static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
+{
+	u32 *jump_cmd, op_flags;
+
+	init_job_desc(desc, 0);
+
+	op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+			(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+
+	/* INIT RNG in non-test mode */
+	append_operation(desc, op_flags);
+
+	if (!handle && do_sk) {
+		/*
+		 * For SH0, Secure Keys must be generated as well
+		 */
+
+		/* wait for done */
+		jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+		set_jump_tgt_here(desc, jump_cmd);
+
+		/*
+		 * load 1 to clear written reg:
+		 * resets the done interrrupt and returns the RNG to idle.
+		 */
+		append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+		/* Initialize State Handle  */
+		append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+				 OP_ALG_AAI_RNG4_SK);
+	}
+
+	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
+
+/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+static void build_deinstantiation_desc(u32 *desc, int handle)
+{
+	init_job_desc(desc, 0);
+
+	/* Uninstantiate State Handle 0 */
+	append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+			 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
+
+	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
+
+/*
+ * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+ *			  the software (no JR/QI used).
+ * @ctrldev - pointer to device
+ * @status - descriptor status, after being run
+ *
+ * Return: - 0 if no error occurred
+ *	   - -ENODEV if the DECO couldn't be acquired
+ *	   - -EAGAIN if an error occurred while executing the descriptor
+ */
+static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+					u32 *status)
+{
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+	struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+	struct caam_deco __iomem *deco = ctrlpriv->deco;
+	unsigned int timeout = 100000;
+	u32 deco_dbg_reg, flags;
+	int i;
+
+
+	if (ctrlpriv->virt_en == 1) {
+		clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
+
+		while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
+		       --timeout)
+			cpu_relax();
+
+		timeout = 100000;
+	}
+
+	clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
+
+	while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
+								 --timeout)
+		cpu_relax();
+
+	if (!timeout) {
+		dev_err(ctrldev, "failed to acquire DECO 0\n");
+		clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < desc_len(desc); i++)
+		wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
+
+	flags = DECO_JQCR_WHL;
+	/*
+	 * If the descriptor length is longer than 4 words, then the
+	 * FOUR bit in JRCTRL register must be set.
+	 */
+	if (desc_len(desc) >= 4)
+		flags |= DECO_JQCR_FOUR;
+
+	/* Instruct the DECO to execute it */
+	clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
+
+	timeout = 10000000;
+	do {
+		deco_dbg_reg = rd_reg32(&deco->desc_dbg);
+		/*
+		 * If an error occured in the descriptor, then
+		 * the DECO status field will be set to 0x0D
+		 */
+		if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+		    DESC_DBG_DECO_STAT_HOST_ERR)
+			break;
+		cpu_relax();
+	} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
+
+	*status = rd_reg32(&deco->op_status_hi) &
+		  DECO_OP_STATUS_HI_ERR_MASK;
+
+	if (ctrlpriv->virt_en == 1)
+		clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
+
+	/* Mark the DECO as free */
+	clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
+
+	if (!timeout)
+		return -EAGAIN;
+
+	return 0;
+}
+
+/*
+ * instantiate_rng - builds and executes a descriptor on DECO0,
+ *		     which initializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ *			for the RNG4 state handles which exist in
+ *			the RNG4 block: 1 if it's been instantiated
+ *			by an external entry, 0 otherwise.
+ * @gen_sk  - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+ *	      Caution: this can be done only once; if the keys need to be
+ *	      regenerated, a POR is required
+ *
+ * Return: - 0 if no error occurred
+ *	   - -ENOMEM if there isn't enough memory to allocate the descriptor
+ *	   - -ENODEV if DECO0 couldn't be acquired
+ *	   - -EAGAIN if an error occurred when executing the descriptor
+ *	      f.i. there was a RNG hardware error due to not "good enough"
+ *	      entropy being aquired.
+ */
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+			   int gen_sk)
+{
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+	struct caam_ctrl __iomem *ctrl;
+	u32 *desc, status = 0, rdsta_val;
+	int ret = 0, sh_idx;
+
+	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+	desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+		/*
+		 * If the corresponding bit is set, this state handle
+		 * was initialized by somebody else, so it's left alone.
+		 */
+		if ((1 << sh_idx) & state_handle_mask)
+			continue;
+
+		/* Create the descriptor for instantiating RNG State Handle */
+		build_instantiation_desc(desc, sh_idx, gen_sk);
+
+		/* Try to run it through DECO0 */
+		ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+		/*
+		 * If ret is not 0, or descriptor status is not 0, then
+		 * something went wrong. No need to try the next state
+		 * handle (if available), bail out here.
+		 * Also, if for some reason, the State Handle didn't get
+		 * instantiated although the descriptor has finished
+		 * without any error (HW optimizations for later
+		 * CAAM eras), then try again.
+		 */
+		if (ret)
+			break;
+
+		rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
+		if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
+		    !(rdsta_val & (1 << sh_idx))) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+		/* Clear the contents before recreating the descriptor */
+		memset(desc, 0x00, CAAM_CMD_SZ * 7);
+	}
+
+	kfree(desc);
+
+	return ret;
+}
+
+/*
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ *		       which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ *			for the RNG4 state handles which exist in
+ *			the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ *	   - -ENOMEM if there isn't enough memory to allocate the descriptor
+ *	   - -ENODEV if DECO0 couldn't be acquired
+ *	   - -EAGAIN if an error occurred when executing the descriptor
+ */
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+	u32 *desc, status;
+	int sh_idx, ret = 0;
+
+	desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+		/*
+		 * If the corresponding bit is set, then it means the state
+		 * handle was initialized by us, and thus it needs to be
+		 * deinitialized as well
+		 */
+		if ((1 << sh_idx) & state_handle_mask) {
+			/*
+			 * Create the descriptor for deinstantating this state
+			 * handle
+			 */
+			build_deinstantiation_desc(desc, sh_idx);
+
+			/* Try to run it through DECO0 */
+			ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+			if (ret ||
+			    (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
+				dev_err(ctrldev,
+					"Failed to deinstantiate RNG4 SH%d\n",
+					sh_idx);
+				break;
+			}
+			dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+		}
+	}
+
+	kfree(desc);
+
+	return ret;
+}
+
+static int caam_remove(struct platform_device *pdev)
+{
+	struct device *ctrldev;
+	struct caam_drv_private *ctrlpriv;
+	struct caam_ctrl __iomem *ctrl;
+
+	ctrldev = &pdev->dev;
+	ctrlpriv = dev_get_drvdata(ctrldev);
+	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+
+	/* Remove platform devices under the crypto node */
+	of_platform_depopulate(ctrldev);
+
+#ifdef CONFIG_CAAM_QI
+	if (ctrlpriv->qidev)
+		caam_qi_shutdown(ctrlpriv->qidev);
+#endif
+
+	/*
+	 * De-initialize RNG state handles initialized by this driver.
+	 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+	 */
+	if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
+		deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+	/* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+	/* Unmap controller region */
+	iounmap(ctrl);
+
+	/* shut clocks off before finalizing shutdown */
+	clk_disable_unprepare(ctrlpriv->caam_ipg);
+	if (ctrlpriv->caam_mem)
+		clk_disable_unprepare(ctrlpriv->caam_mem);
+	clk_disable_unprepare(ctrlpriv->caam_aclk);
+	if (ctrlpriv->caam_emi_slow)
+		clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+	return 0;
+}
+
+/*
+ * kick_trng - sets the various parameters for enabling the initialization
+ *	       of the RNG4 block in CAAM
+ * @pdev - pointer to the platform device
+ * @ent_delay - Defines the length (in system clocks) of each entropy sample.
+ */
+static void kick_trng(struct platform_device *pdev, int ent_delay)
+{
+	struct device *ctrldev = &pdev->dev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+	struct caam_ctrl __iomem *ctrl;
+	struct rng4tst __iomem *r4tst;
+	u32 val;
+
+	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+	r4tst = &ctrl->r4tst[0];
+
+	/* put RNG4 into program mode */
+	clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
+
+	/*
+	 * Performance-wise, it does not make sense to
+	 * set the delay to a value that is lower
+	 * than the last one that worked (i.e. the state handles
+	 * were instantiated properly. Thus, instead of wasting
+	 * time trying to set the values controlling the sample
+	 * frequency, the function simply returns.
+	 */
+	val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+	      >> RTSDCTL_ENT_DLY_SHIFT;
+	if (ent_delay <= val)
+		goto start_rng;
+
+	val = rd_reg32(&r4tst->rtsdctl);
+	val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+	      (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
+	wr_reg32(&r4tst->rtsdctl, val);
+	/* min. freq. count, equal to 1/4 of the entropy sample length */
+	wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+	/* disable maximum frequency count */
+	wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
+	/* read the control register */
+	val = rd_reg32(&r4tst->rtmctl);
+start_rng:
+	/*
+	 * select raw sampling in both entropy shifter
+	 * and statistical checker; ; put RNG4 into run mode
+	 */
+	clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
+}
+
+static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
+{
+	static const struct {
+		u16 ip_id;
+		u8 maj_rev;
+		u8 era;
+	} id[] = {
+		{0x0A10, 1, 1},
+		{0x0A10, 2, 2},
+		{0x0A12, 1, 3},
+		{0x0A14, 1, 3},
+		{0x0A14, 2, 4},
+		{0x0A16, 1, 4},
+		{0x0A10, 3, 4},
+		{0x0A11, 1, 4},
+		{0x0A18, 1, 4},
+		{0x0A11, 2, 5},
+		{0x0A12, 2, 5},
+		{0x0A13, 1, 5},
+		{0x0A1C, 1, 5}
+	};
+	u32 ccbvid, id_ms;
+	u8 maj_rev, era;
+	u16 ip_id;
+	int i;
+
+	ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
+	era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
+	if (era)	/* This is '0' prior to CAAM ERA-6 */
+		return era;
+
+	id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
+	ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
+	maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
+
+	for (i = 0; i < ARRAY_SIZE(id); i++)
+		if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
+			return id[i].era;
+
+	return -ENOTSUPP;
+}
+
+/**
+ * caam_get_era() - Return the ERA of the SEC on SoC, based
+ * on "sec-era" optional property in the DTS. This property is updated
+ * by u-boot.
+ * In case this property is not passed an attempt to retrieve the CAAM
+ * era via register reads will be made.
+ **/
+static int caam_get_era(struct caam_ctrl __iomem *ctrl)
+{
+	struct device_node *caam_node;
+	int ret;
+	u32 prop;
+
+	caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
+	of_node_put(caam_node);
+
+	if (!ret)
+		return prop;
+	else
+		return caam_get_era_from_hw(ctrl);
+}
+
+static const struct of_device_id caam_match[] = {
+	{
+		.compatible = "fsl,sec-v4.0",
+	},
+	{
+		.compatible = "fsl,sec4.0",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, caam_match);
+
+/* Probe routine for CAAM top (controller) level */
+static int caam_probe(struct platform_device *pdev)
+{
+	int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+	u64 caam_id;
+	static const struct soc_device_attribute imx_soc[] = {
+		{.family = "Freescale i.MX"},
+		{},
+	};
+	struct device *dev;
+	struct device_node *nprop, *np;
+	struct caam_ctrl __iomem *ctrl;
+	struct caam_drv_private *ctrlpriv;
+	struct clk *clk;
+#ifdef CONFIG_DEBUG_FS
+	struct caam_perfmon *perfmon;
+#endif
+	u32 scfgr, comp_params;
+	u32 cha_vid_ls;
+	int pg_size;
+	int BLOCK_OFFSET = 0;
+
+	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
+	if (!ctrlpriv)
+		return -ENOMEM;
+
+	dev = &pdev->dev;
+	dev_set_drvdata(dev, ctrlpriv);
+	nprop = pdev->dev.of_node;
+
+	caam_imx = (bool)soc_device_match(imx_soc);
+
+	/* Enable clocking */
+	clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM ipg clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_ipg = clk;
+
+	if (!of_machine_is_compatible("fsl,imx7d") &&
+	    !of_machine_is_compatible("fsl,imx7s")) {
+		clk = caam_drv_identify_clk(&pdev->dev, "mem");
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			dev_err(&pdev->dev,
+				"can't identify CAAM mem clk: %d\n", ret);
+			return ret;
+		}
+		ctrlpriv->caam_mem = clk;
+	}
+
+	clk = caam_drv_identify_clk(&pdev->dev, "aclk");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM aclk clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_aclk = clk;
+
+	if (!of_machine_is_compatible("fsl,imx6ul") &&
+	    !of_machine_is_compatible("fsl,imx7d") &&
+	    !of_machine_is_compatible("fsl,imx7s")) {
+		clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			dev_err(&pdev->dev,
+				"can't identify CAAM emi_slow clk: %d\n", ret);
+			return ret;
+		}
+		ctrlpriv->caam_emi_slow = clk;
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+		return ret;
+	}
+
+	if (ctrlpriv->caam_mem) {
+		ret = clk_prepare_enable(ctrlpriv->caam_mem);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+				ret);
+			goto disable_caam_ipg;
+		}
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_aclk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
+		goto disable_caam_mem;
+	}
+
+	if (ctrlpriv->caam_emi_slow) {
+		ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+				ret);
+			goto disable_caam_aclk;
+		}
+	}
+
+	/* Get configuration properties from device tree */
+	/* First, get register page */
+	ctrl = of_iomap(nprop, 0);
+	if (ctrl == NULL) {
+		dev_err(dev, "caam: of_iomap() failed\n");
+		ret = -ENOMEM;
+		goto disable_caam_emi_slow;
+	}
+
+	caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+				  (CSTA_PLEND | CSTA_ALT_PLEND));
+
+	/* Finding the page size for using the CTPR_MS register */
+	comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
+	pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+
+	/* Allocating the BLOCK_OFFSET based on the supported page size on
+	 * the platform
+	 */
+	if (pg_size == 0)
+		BLOCK_OFFSET = PG_SIZE_4K;
+	else
+		BLOCK_OFFSET = PG_SIZE_64K;
+
+	ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+	ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+			   ((__force uint8_t *)ctrl +
+			    BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
+			   );
+	ctrlpriv->deco = (struct caam_deco __iomem __force *)
+			 ((__force uint8_t *)ctrl +
+			 BLOCK_OFFSET * DECO_BLOCK_NUMBER
+			 );
+
+	/* Get the IRQ of the controller (for security violations only) */
+	ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
+
+	/*
+	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+	 * long pointers in master configuration register.
+	 * In case of SoCs with Management Complex, MC f/w performs
+	 * the configuration.
+	 */
+	caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+	np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
+	ctrlpriv->mc_en = !!np;
+	of_node_put(np);
+
+	if (!ctrlpriv->mc_en)
+		clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+			      MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+			      MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+			      (sizeof(dma_addr_t) == sizeof(u64) ?
+			       MCFGR_LONG_PTR : 0));
+
+	/*
+	 *  Read the Compile Time paramters and SCFGR to determine
+	 * if Virtualization is enabled for this platform
+	 */
+	scfgr = rd_reg32(&ctrl->scfgr);
+
+	ctrlpriv->virt_en = 0;
+	if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+		/* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+		 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+		 */
+		if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+		    (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+		       (scfgr & SCFGR_VIRT_EN)))
+				ctrlpriv->virt_en = 1;
+	} else {
+		/* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+		if (comp_params & CTPR_MS_VIRT_EN_POR)
+				ctrlpriv->virt_en = 1;
+	}
+
+	if (ctrlpriv->virt_en == 1)
+		clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
+			      JRSTART_JR1_START | JRSTART_JR2_START |
+			      JRSTART_JR3_START);
+
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		if (caam_dpaa2)
+			ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+		else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+			ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+		else
+			ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+	} else {
+		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	}
+	if (ret) {
+		dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+		goto iounmap_ctrl;
+	}
+
+	ctrlpriv->era = caam_get_era(ctrl);
+
+	ret = of_platform_populate(nprop, caam_match, NULL, dev);
+	if (ret) {
+		dev_err(dev, "JR platform devices creation error\n");
+		goto iounmap_ctrl;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	/*
+	 * FIXME: needs better naming distinction, as some amalgamation of
+	 * "caam" and nprop->full_name. The OF name isn't distinctive,
+	 * but does separate instances
+	 */
+	perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+
+	ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+	ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+#endif
+
+	ring = 0;
+	for_each_available_child_of_node(nprop, np)
+		if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+		    of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+			ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+					     ((__force uint8_t *)ctrl +
+					     (ring + JR_BLOCK_NUMBER) *
+					      BLOCK_OFFSET
+					     );
+			ctrlpriv->total_jobrs++;
+			ring++;
+		}
+
+	/* Check to see if (DPAA 1.x) QI present. If so, enable */
+	ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+	if (ctrlpriv->qi_present && !caam_dpaa2) {
+		ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+			       ((__force uint8_t *)ctrl +
+				 BLOCK_OFFSET * QI_BLOCK_NUMBER
+			       );
+		/* This is all that's required to physically enable QI */
+		wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
+
+		/* If QMAN driver is present, init CAAM-QI backend */
+#ifdef CONFIG_CAAM_QI
+		ret = caam_qi_init(pdev);
+		if (ret)
+			dev_err(dev, "caam qi i/f init failed: %d\n", ret);
+#endif
+	}
+
+	/* If no QI and no rings specified, quit and go home */
+	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
+		dev_err(dev, "no queues configured, terminating\n");
+		ret = -ENOMEM;
+		goto caam_remove;
+	}
+
+	cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+
+	/*
+	 * If SEC has RNG version >= 4 and RNG state handle has not been
+	 * already instantiated, do RNG instantiation
+	 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+	 */
+	if (!ctrlpriv->mc_en &&
+	    (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+		ctrlpriv->rng4_sh_init =
+			rd_reg32(&ctrl->r4tst[0].rdsta);
+		/*
+		 * If the secure keys (TDKEK, JDKEK, TDSK), were already
+		 * generated, signal this to the function that is instantiating
+		 * the state handles. An error would occur if RNG4 attempts
+		 * to regenerate these keys before the next POR.
+		 */
+		gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+		ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+		do {
+			int inst_handles =
+				rd_reg32(&ctrl->r4tst[0].rdsta) &
+								RDSTA_IFMASK;
+			/*
+			 * If either SH were instantiated by somebody else
+			 * (e.g. u-boot) then it is assumed that the entropy
+			 * parameters are properly set and thus the function
+			 * setting these (kick_trng(...)) is skipped.
+			 * Also, if a handle was instantiated, do not change
+			 * the TRNG parameters.
+			 */
+			if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+				dev_info(dev,
+					 "Entropy delay = %u\n",
+					 ent_delay);
+				kick_trng(pdev, ent_delay);
+				ent_delay += 400;
+			}
+			/*
+			 * if instantiate_rng(...) fails, the loop will rerun
+			 * and the kick_trng(...) function will modfiy the
+			 * upper and lower limits of the entropy sampling
+			 * interval, leading to a sucessful initialization of
+			 * the RNG.
+			 */
+			ret = instantiate_rng(dev, inst_handles,
+					      gen_sk);
+			if (ret == -EAGAIN)
+				/*
+				 * if here, the loop will rerun,
+				 * so don't hog the CPU
+				 */
+				cpu_relax();
+		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+		if (ret) {
+			dev_err(dev, "failed to instantiate RNG");
+			goto caam_remove;
+		}
+		/*
+		 * Set handles init'ed by this module as the complement of the
+		 * already initialized ones
+		 */
+		ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
+
+		/* Enable RDB bit so that RNG works faster */
+		clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
+	}
+
+	/* NOTE: RTIC detection ought to go here, around Si time */
+
+	caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
+		  (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
+
+	/* Report "alive" for developer to see */
+	dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
+		 ctrlpriv->era);
+	dev_info(dev, "job rings = %d, qi = %d\n",
+		 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+
+#ifdef CONFIG_DEBUG_FS
+	debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->req_dequeued,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ob_enc_req,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ib_dec_req,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+			    &caam_fops_u64_ro);
+
+	/* Controller level - global status values */
+	debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->faultaddr,
+			    &caam_fops_u32_ro);
+	debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->faultdetail,
+			    &caam_fops_u32_ro);
+	debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->status,
+			    &caam_fops_u32_ro);
+
+	/* Internal covering keys (useful in non-secure mode only) */
+	ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
+	ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_kek = debugfs_create_blob("kek",
+						S_IRUSR |
+						S_IRGRP | S_IROTH,
+						ctrlpriv->ctl,
+						&ctrlpriv->ctl_kek_wrap);
+
+	ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
+	ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
+						 S_IRUSR |
+						 S_IRGRP | S_IROTH,
+						 ctrlpriv->ctl,
+						 &ctrlpriv->ctl_tkek_wrap);
+
+	ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
+	ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
+						 S_IRUSR |
+						 S_IRGRP | S_IROTH,
+						 ctrlpriv->ctl,
+						 &ctrlpriv->ctl_tdsk_wrap);
+#endif
+	return 0;
+
+caam_remove:
+	caam_remove(pdev);
+	return ret;
+
+iounmap_ctrl:
+	iounmap(ctrl);
+disable_caam_emi_slow:
+	if (ctrlpriv->caam_emi_slow)
+		clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+disable_caam_aclk:
+	clk_disable_unprepare(ctrlpriv->caam_aclk);
+disable_caam_mem:
+	if (ctrlpriv->caam_mem)
+		clk_disable_unprepare(ctrlpriv->caam_mem);
+disable_caam_ipg:
+	clk_disable_unprepare(ctrlpriv->caam_ipg);
+	return ret;
+}
+
+static struct platform_driver caam_driver = {
+	.driver = {
+		.name = "caam",
+		.of_match_table = caam_match,
+	},
+	.probe       = caam_probe,
+	.remove      = caam_remove,
+};
+
+module_platform_driver(caam_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
new file mode 100644
index 0000000..f3ecd67
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM control-plane driver backend public-level include definitions
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CTRL_H
+#define CTRL_H
+
+/* Prototypes for backend-level services exposed to APIs */
+extern bool caam_dpaa2;
+
+#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
new file mode 100644
index 0000000..f76ff16
--- /dev/null
+++ b/drivers/crypto/caam/desc.h
@@ -0,0 +1,1656 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM descriptor composition header
+ * Definitions to support CAAM descriptor instruction generation
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef DESC_H
+#define DESC_H
+
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT		0x80000000	/* Entry points to table */
+#define SEC4_SG_LEN_FIN		0x40000000	/* Last ent in table */
+#define SEC4_SG_BPID_MASK	0x000000ff
+#define SEC4_SG_BPID_SHIFT	16
+#define SEC4_SG_LEN_MASK	0x3fffffff	/* Excludes EXT and FINAL */
+#define SEC4_SG_OFFSET_MASK	0x00001fff
+
+/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE	64
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE		16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT		27
+#define CMD_MASK		0xf8000000
+
+#define CMD_KEY			(0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY		(0x01 << CMD_SHIFT)
+#define CMD_LOAD		(0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD		(0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD		(0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD	(0x05 << CMD_SHIFT)
+#define CMD_STORE		(0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE		(0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE		(0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE	(0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN		(0x0e << CMD_SHIFT)
+#define CMD_MOVE		(0x0f << CMD_SHIFT)
+#define CMD_OPERATION		(0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE		(0x12 << CMD_SHIFT)
+#define CMD_JUMP		(0x14 << CMD_SHIFT)
+#define CMD_MATH		(0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR		(0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR	(0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR		(0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR		(0x1f << CMD_SHIFT)
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT		25
+#define CLASS_MASK		(0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE		(0x00 << CLASS_SHIFT)
+#define CLASS_1			(0x01 << CLASS_SHIFT)
+#define CLASS_2			(0x02 << CLASS_SHIFT)
+#define CLASS_BOTH		(0x03 << CLASS_SHIFT)
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Do Not Run - marks a descriptor inexecutable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR			0x01000000
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE			0x00800000
+#define HDR_ZRO			0x00008000
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_SHIFT	16
+#define HDR_START_IDX_MASK	(0x3f << HDR_START_IDX_SHIFT)
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK	0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK	0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED		0x00004000
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED	0x00002000
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX		0x00001000
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED		0x00001000
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE		0x00000800
+
+/* Propogate DNR property to SharedDesc */
+#define HDR_PROP_DNR		0x00000800
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_SHIFT	8
+#define HDR_SD_SHARE_MASK	(0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_JD_SHARE_SHIFT	8
+#define HDR_JD_SHARE_MASK	(0x07 << HDR_JD_SHARE_SHIFT)
+
+#define HDR_SHARE_NEVER		(0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT		(0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL	(0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS	(0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER		(0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK	0x7f
+#define HDR_SD_LENGTH_MASK	0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT	25	/* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK	(0x03 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF			0x01000000
+#define KEY_VLF			0x01000000
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM			0x00800000
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if TK is set
+ */
+#define KEY_ENC			0x00400000
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB			0x00200000
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT			0x00100000
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK			0x00008000
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
+ */
+#define KEY_DEST_SHIFT		16
+#define KEY_DEST_MASK		(0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG	(0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E		(0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX	(0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT	(0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK		0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT	25
+#define LDST_CLASS_MASK		(0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB	(0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB	(0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB	(0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO		(0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF		0x01000000
+#define LDST_VLF		LDST_SGF
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK		1
+#define LDST_IMM_SHIFT		23
+#define LDST_IMM		(LDST_IMM_MASK << LDST_IMM_SHIFT)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT	16
+#define LDST_SRCDST_MASK	(0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT	(0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY		(0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO		(0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO	(0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG	(0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG	(0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG	(0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG	(0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL	(0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL	(0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL	(0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD	(0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW		(0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0	(0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT		(0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1	(0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2	(0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ	(0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3	(0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ	(0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1	(0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ	(0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ	(0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ	(0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ	(0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX	(0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF	(0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB	(0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED	(0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE	(0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO	(0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT	8
+#define LDST_OFFSET_MASK	(0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT		0
+#define LDOFF_CHG_SHARE_MASK		(0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER		(0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP		(0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP	(0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO		(1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO	(1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT	4
+#define LDOFF_CHG_NONSEQLIODN_MASK	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ	(0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT	6
+#define LDOFF_CHG_SEQLIODN_MASK		(0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ		(0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes	*/
+#define LDST_LEN_SHIFT		0
+#define LDST_LEN_MASK		(0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT		(1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR		(1 << 6)
+#define LDLEN_RST_OFIFO			(1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID	(1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD	(1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT	0
+#define LDLEN_SET_OFIFO_OFFSET_MASK	(3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT	25
+#define FIFOLD_CLASS_MASK	(0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP	(0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1	(0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2	(0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH	(0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT	25
+#define FIFOST_CLASS_MASK	(0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL	(0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY	(0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY	(0x02 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT	24
+#define FIFOLDST_SGF_MASK	(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK	(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF		(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF		(1 << FIFOLDST_SGF_SHIFT)
+
+/* Immediate - Data follows command in descriptor */
+#define FIFOLD_IMM_SHIFT	23
+#define FIFOLD_IMM_MASK		(1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM		(1 << FIFOLD_IMM_SHIFT)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT	23
+#define FIFOST_CONT_MASK	(1 << FIFOST_CONT_SHIFT)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT	22
+#define FIFOLDST_EXT_MASK	(1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT		(1 << FIFOLDST_EXT_SHIFT)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT	16
+#define FIFOLD_CONT_TYPE_SHIFT	19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK	(0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK		(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK	(0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0	(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1	(0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2	(0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3	(0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0	(0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1	(0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2	(0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N	(0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A	(0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B	(0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK	(0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG		(0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2	(0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV		(0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA	(0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD		(0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV		(0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION	(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1	(0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1	(0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH	(0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2	(0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH	(0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO	(0x0F << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK	0xffff
+#define FIFOLDST_EXT_LEN_MASK	0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT	16
+#define FIFOST_TYPE_MASK	(0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0	 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1	 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2	 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3	 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0	 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1	 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2	 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3	 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N	 (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A	 (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B	 (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK	 (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK	 (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK	 (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK	 (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK	 (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK	 (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK	 (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE	 (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO	 (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP	 (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT		24
+#define OP_TYPE_MASK		(0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL	(0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK		(0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG	(0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG	(0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL	(0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL	(0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT		16
+#define OP_PCLID_MASK		(0xff << 16)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF	(0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF	(0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF	(0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF	(0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF	(0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF	(0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF		(0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB		(0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY	(0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR	(0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN	(0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY	(0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENC_PUBKEY  (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADEC_PRVKEY  (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5	(0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1	(0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224	(0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256	(0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384	(0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512	(0x25 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_MD5	(0x60 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA1	(0x61 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA224	(0x62 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA256	(0x63 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA384	(0x64 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA512	(0x65 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC		(0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP		(0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC		(0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI		(0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX		(0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30		(0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10		(0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11		(0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12		(0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS		(0x0c << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK				 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK		 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK			 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64			 0x0100
+#define OP_PCL_IPSEC_DES			 0x0200
+#define OP_PCL_IPSEC_3DES			 0x0300
+#define OP_PCL_IPSEC_AES_CBC			 0x0c00
+#define OP_PCL_IPSEC_AES_CTR			 0x0d00
+#define OP_PCL_IPSEC_AES_XTS			 0x1600
+#define OP_PCL_IPSEC_AES_CCM8			 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12			 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16			 0x1000
+#define OP_PCL_IPSEC_AES_GCM8			 0x1200
+#define OP_PCL_IPSEC_AES_GCM12			 0x1300
+#define OP_PCL_IPSEC_AES_GCM16			 0x1400
+
+#define OP_PCL_IPSEC_HMAC_NULL			 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96		 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96		 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96		 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128		 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160		 0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128		 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192		 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256		 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK			 0xff00
+#define OP_PCL_SRTP_AUTH_MASK			 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR			 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160		 0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17		 0xc022
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5		 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA		 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA		 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5		 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2		 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5			 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2		 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA		 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2		 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3		 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4		 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5		 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6		 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7		 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9		 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA			 0x0028
+
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7		 0x0026
+
+
+#define OP_PCL_TLS10_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA			 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512		 0xff65
+
+
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA			 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512		 0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA			 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256	0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2	 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3	 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4	 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5	 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6	 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256	0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2	 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3	 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4	 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5	 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6	 0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512		 0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5		0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10		 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11		 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12		 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13		 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14		 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15		 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16		 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17		 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18		 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5			 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA		 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7		 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA			 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7		 0x001a
+
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160		 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224		 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256		 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384		 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512		 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512		 0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM			 0x0201
+#define OP_PCL_WIMAX_OFDMA			 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI				 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC				 0x0001
+
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT	14
+#define OP_PCL_DKP_SRC_MASK	(3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM	(0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ	(1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR	(2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF	(3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT	12
+#define OP_PCL_DKP_DST_MASK	(3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM	(0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ	(1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR	(2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF	(3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT	0
+#define OP_PCL_DKP_KEY_MASK	(0xfff << OP_PCL_DKP_KEY_SHIFT)
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST			 0x0008
+#define OP_PCL_PKPROT_DECRYPT			 0x0004
+#define OP_PCL_PKPROT_ECC			 0x0002
+#define OP_PCL_PKPROT_F2M			 0x0001
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT	24
+#define OP_ALG_TYPE_MASK	(0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1	(2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2	(4 << OP_ALG_TYPE_SHIFT)
+
+#define OP_ALG_ALGSEL_SHIFT	16
+#define OP_ALG_ALGSEL_MASK	(0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK	(0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES	(0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES	(0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES	(0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4	(0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5	(0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1	(0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224	(0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256	(0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384	(0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512	(0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG	(0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW	(0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8	(0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI	(0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC	(0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9	(0xA0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT	4
+#define OP_ALG_AAI_MASK		(0x1ff << OP_ALG_AAI_SHIFT)
+
+/* blockcipher AAI set */
+#define OP_ALG_AAI_CTR_MOD128	(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8	(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16	(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24	(0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32	(0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40	(0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48	(0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56	(0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64	(0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72	(0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80	(0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88	(0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96	(0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104	(0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112	(0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120	(0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB		(0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB		(0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB		(0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS		(0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC		(0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC	(0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM		(0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM		(0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC	(0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC	(0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD	(0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK		(0x100 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_AAI_RNG		(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB	(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP	(0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_0	(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1	(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_PS	(0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI	(0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK	(0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH		(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC		(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC		(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP	(0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_AAI_802		(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385		(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY	(0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS		(0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC		(0x40 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW AAI set */
+#define OP_ALG_AAI_F8		(0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9		(0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE		(0x20 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT		2
+#define OP_ALG_AS_MASK		(0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE	(0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT		(1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE	(2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL	(3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT	1
+#define OP_ALG_ICV_MASK		(1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF		(0 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_ON		(1 << OP_ALG_ICV_SHIFT)
+
+#define OP_ALG_DIR_SHIFT	0
+#define OP_ALG_DIR_MASK		1
+#define OP_ALG_DECRYPT		0
+#define OP_ALG_ENCRYPT		1
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK		0x00800000
+#define OP_ALG_PK_FUN_MASK	0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM	0x80000
+#define OP_ALG_PKMODE_B_RAM	0x40000
+#define OP_ALG_PKMODE_E_RAM	0x20000
+#define OP_ALG_PKMODE_N_RAM	0x10000
+#define OP_ALG_PKMODE_CLEARMEM	0x00001
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY	0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY	0x40000
+#define OP_ALG_PKMODE_MOD_F2M		0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN		0x10000
+#define OP_ALG_PKMODE_PRJECTV		0x00800
+#define OP_ALG_PKMODE_TIME_EQ		0x400
+#define OP_ALG_PKMODE_OUT_B		0x000
+#define OP_ALG_PKMODE_OUT_A		0x100
+#define OP_ALG_PKMODE_MOD_ADD		0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB	0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA	0x004
+#define OP_ALG_PKMODE_MOD_MULT		0x005
+#define OP_ALG_PKMODE_MOD_EXPO		0x006
+#define OP_ALG_PKMODE_MOD_REDUCT	0x007
+#define OP_ALG_PKMODE_MOD_INV		0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD	0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL	0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT	0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST	0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST	0x00d
+#define OP_ALG_PKMODE_MOD_GCD		0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY	0x00f
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT	17
+#define OP_ALG_PKMODE_SRC_REG_MASK	(7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT	10
+#define OP_ALG_PKMODE_DST_REG_MASK	(7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT	8
+#define OP_ALG_PKMODE_SRC_SEG_MASK	(3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT	6
+#define OP_ALG_PKMODE_DST_SEG_MASK	(3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A		(0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B		(1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N		(3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A		(0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B		(1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E		(2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N		(3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0		(0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1		(1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2		(2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3		(3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0		(0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1		(1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2		(2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3		(3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ	0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ	0x81
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS	0x04000000
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL	0x02000000
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF	0x01000000
+
+/* Appends to a previous pointer */
+#define SQIN_PRE	0x00800000
+
+/* Use extended length following pointer */
+#define SQIN_EXT	0x00400000
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO	0x00200000
+
+/* Replace job descriptor */
+#define SQIN_RJD	0x00100000
+
+#define SQIN_LEN_SHIFT		 0
+#define SQIN_LEN_MASK		(0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF	0x01000000
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE	SQIN_PRE
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO	 SQIN_RTO
+
+/* Use extended length following pointer */
+#define SQOUT_EXT	0x00400000
+
+#define SQOUT_LEN_SHIFT		0
+#define SQOUT_LEN_MASK		(0xffff << SQOUT_LEN_SHIFT)
+
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT		16
+#define SIGN_TYPE_MASK		(0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL		(0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2		(0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3		(0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4		(0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT		25
+#define MOVE_AUX_MASK		(3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS		(2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS		(1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT	24
+#define MOVE_WAITCOMP_MASK	(1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP		(1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT		20
+#define MOVE_SRC_MASK		(0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX	(0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX	(0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO	(0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF	(0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0		(0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1		(0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2		(0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3		(0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO		(0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL	(0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT		16
+#define MOVE_DEST_MASK		(0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX	(0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX	(0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO	(0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF	(0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0		(0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1		(0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2		(0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3		(0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO	(0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO	(0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A		(0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY	(0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY	(0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT	8
+#define MOVE_OFFSET_MASK	(0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT		0
+#define MOVE_LEN_MASK		(0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT	0
+#define MOVELEN_MRSEL_MASK	(0x3 << MOVE_LEN_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT		26
+#define MATH_IFB_MASK		(1 << MATH_IFB_SHIFT)
+#define MATH_IFB		(1 << MATH_IFB_SHIFT)
+
+#define MATH_NFU_SHIFT		25
+#define MATH_NFU_MASK		(1 << MATH_NFU_SHIFT)
+#define MATH_NFU		(1 << MATH_NFU_SHIFT)
+
+#define MATH_STL_SHIFT		24
+#define MATH_STL_MASK		(1 << MATH_STL_SHIFT)
+#define MATH_STL		(1 << MATH_STL_SHIFT)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT		20
+#define MATH_FUN_MASK		(0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD		(0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC		(0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB		(0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB		(0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR		(0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND		(0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR		(0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT		(0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT		(0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD		(0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT		(0x0a << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT		16
+#define MATH_SRC0_MASK		(0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0		(0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1		(0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2		(0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3		(0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM		(0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_DPOVRD	(0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN	(0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN	(0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN	(0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN	(0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO		(0x0c << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT		12
+#define MATH_SRC1_MASK		(0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0		(0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1		(0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2		(0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3		(0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM		(0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_DPOVRD	(0x07 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO	(0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO	(0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE		(0x0c << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT		8
+#define MATH_DEST_MASK		(0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0		(0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1		(0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2		(0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3		(0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD	(0x07 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN	(0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN	(0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN	(0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN	(0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE		(0x0f << MATH_DEST_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT		0
+#define MATH_LEN_MASK		(0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE		0x01
+#define MATH_LEN_2BYTE		0x02
+#define MATH_LEN_4BYTE		0x04
+#define MATH_LEN_8BYTE		0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT	25
+#define JUMP_CLASS_MASK		(3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE		0
+#define JUMP_CLASS_CLASS1	(1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2	(2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH		(3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT		24
+#define JUMP_JSL_MASK		(1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL		(1 << JUMP_JSL_SHIFT)
+
+#define JUMP_TYPE_SHIFT		22
+#define JUMP_TYPE_LOCAL		(0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL	(0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT		(0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER	(0x03 << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT		16
+#define JUMP_TEST_MASK		(0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL		(0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL	(0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY		(0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY	(0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT		8
+#define JUMP_COND_MASK		(0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0		(0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1	(0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME	(0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N	(0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z	(0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C	(0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV	(0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP		((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD		((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF		((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM		((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP		((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP		((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP		((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP		((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT	0
+#define JUMP_OFFSET_MASK	(0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT	30
+#define NFIFOENTRY_DEST_MASK	(3 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_DECO	(0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1	(1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2	(2 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_BOTH	(3 << NFIFOENTRY_DEST_SHIFT)
+
+#define NFIFOENTRY_LC2_SHIFT	29
+#define NFIFOENTRY_LC2_MASK	(1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2		(1 << NFIFOENTRY_LC2_SHIFT)
+
+#define NFIFOENTRY_LC1_SHIFT	28
+#define NFIFOENTRY_LC1_MASK	(1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1		(1 << NFIFOENTRY_LC1_SHIFT)
+
+#define NFIFOENTRY_FC2_SHIFT	27
+#define NFIFOENTRY_FC2_MASK	(1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2		(1 << NFIFOENTRY_FC2_SHIFT)
+
+#define NFIFOENTRY_FC1_SHIFT	26
+#define NFIFOENTRY_FC1_MASK	(1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1		(1 << NFIFOENTRY_FC1_SHIFT)
+
+#define NFIFOENTRY_STYPE_SHIFT	24
+#define NFIFOENTRY_STYPE_MASK	(3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO	(0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO	(1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD	(2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP	(3 << NFIFOENTRY_STYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SHIFT	20
+#define NFIFOENTRY_DTYPE_MASK	(0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX	(0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD	(0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV	(0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD	(0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV	(0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP	(0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG	(0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0	(0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1	(0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2	(0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3	(0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0	(0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1	(0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2	(0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3	(0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N	(0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E	(0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A	(0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B	(0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+
+#define NFIFOENTRY_BND_SHIFT	19
+#define NFIFOENTRY_BND_MASK	(1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND		(1 << NFIFOENTRY_BND_SHIFT)
+
+#define NFIFOENTRY_PTYPE_SHIFT	16
+#define NFIFOENTRY_PTYPE_MASK	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS		(0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS	(0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT	(0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND		(0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ	(0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ	(0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N		(0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT	15
+#define NFIFOENTRY_OC_MASK	(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC		(1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_AST_SHIFT	14
+#define NFIFOENTRY_AST_MASK	(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST		(1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_BM_SHIFT	11
+#define NFIFOENTRY_BM_MASK	(1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM		(1 << NFIFOENTRY_BM_SHIFT)
+
+#define NFIFOENTRY_PS_SHIFT	10
+#define NFIFOENTRY_PS_MASK	(1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS		(1 << NFIFOENTRY_PS_SHIFT)
+
+#define NFIFOENTRY_DLEN_SHIFT	0
+#define NFIFOENTRY_DLEN_MASK	(0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT	0
+#define NFIFOENTRY_PLEN_MASK	(0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/* Append Load Immediate Command */
+#define FD_CMD_APPEND_LOAD_IMMEDIATE			0x80000000
+
+/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN		0x40000000
+
+/* Frame Descriptor Command for Replacement Job Descriptor */
+#define FD_CMD_REPLACE_JOB_DESC				0x20000000
+
+#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
new file mode 100644
index 0000000..d4256fa
--- /dev/null
+++ b/drivers/crypto/caam/desc_constr.h
@@ -0,0 +1,540 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * caam descriptor construction helper functions
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef DESC_CONSTR_H
+#define DESC_CONSTR_H
+
+#include "desc.h"
+#include "regs.h"
+
+#define IMMEDIATE (1 << 23)
+#define CAAM_CMD_SZ sizeof(u32)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+#ifdef DEBUG
+#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
+			      &__func__[sizeof("append")]); } while (0)
+#else
+#define PRINT_POS
+#endif
+
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+			       LDST_SRCDST_WORD_DECOCTRL | \
+			       (LDOFF_CHG_SHARE_OK_NO_PROP << \
+				LDST_OFFSET_SHIFT))
+#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+				LDST_SRCDST_WORD_DECOCTRL | \
+				(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+			       LDST_SRCDST_WORD_DECOCTRL | \
+			       (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+
+extern bool caam_little_end;
+
+static inline int desc_len(u32 * const desc)
+{
+	return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
+}
+
+static inline int desc_bytes(void * const desc)
+{
+	return desc_len(desc) * CAAM_CMD_SZ;
+}
+
+static inline u32 *desc_end(u32 * const desc)
+{
+	return desc + desc_len(desc);
+}
+
+static inline void *sh_desc_pdb(u32 * const desc)
+{
+	return desc + 1;
+}
+
+static inline void init_desc(u32 * const desc, u32 options)
+{
+	*desc = cpu_to_caam32((options | HDR_ONE) + 1);
+}
+
+static inline void init_sh_desc(u32 * const desc, u32 options)
+{
+	PRINT_POS;
+	init_desc(desc, CMD_SHARED_DESC_HDR | options);
+}
+
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+				    size_t pdb_bytes)
+{
+	u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+	init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) |
+		     options);
+}
+
+static inline void init_job_desc(u32 * const desc, u32 options)
+{
+	init_desc(desc, CMD_DESC_HDR | options);
+}
+
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+				     size_t pdb_bytes)
+{
+	u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+	init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
+}
+
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
+{
+	dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
+
+	*offset = cpu_to_caam_dma(ptr);
+
+	(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
+				CAAM_PTR_SZ / CAAM_CMD_SZ);
+}
+
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+					int len, u32 options)
+{
+	PRINT_POS;
+	init_job_desc(desc, HDR_SHARED | options |
+		      (len << HDR_START_IDX_SHIFT));
+	append_ptr(desc, ptr);
+}
+
+static inline void append_data(u32 * const desc, const void *data, int len)
+{
+	u32 *offset = desc_end(desc);
+
+	if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+		memcpy(offset, data, len);
+
+	(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
+				(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
+}
+
+static inline void append_cmd(u32 * const desc, u32 command)
+{
+	u32 *cmd = desc_end(desc);
+
+	*cmd = cpu_to_caam32(command);
+
+	(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 1);
+}
+
+#define append_u32 append_cmd
+
+static inline void append_u64(u32 * const desc, u64 data)
+{
+	u32 *offset = desc_end(desc);
+
+	/* Only 32-bit alignment is guaranteed in descriptor buffer */
+	if (caam_little_end) {
+		*offset = cpu_to_caam32(lower_32_bits(data));
+		*(++offset) = cpu_to_caam32(upper_32_bits(data));
+	} else {
+		*offset = cpu_to_caam32(upper_32_bits(data));
+		*(++offset) = cpu_to_caam32(lower_32_bits(data));
+	}
+
+	(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 2);
+}
+
+/* Write command without affecting header, and return pointer to next word */
+static inline u32 *write_cmd(u32 * const desc, u32 command)
+{
+	*desc = cpu_to_caam32(command);
+
+	return desc + 1;
+}
+
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
+				  u32 command)
+{
+	append_cmd(desc, command | len);
+	append_ptr(desc, ptr);
+}
+
+/* Write length after pointer, rather than inside command */
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
+					 unsigned int len, u32 command)
+{
+	append_cmd(desc, command);
+	if (!(command & (SQIN_RTO | SQIN_PRE)))
+		append_ptr(desc, ptr);
+	append_cmd(desc, len);
+}
+
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
+				   u32 command)
+{
+	append_cmd(desc, command | IMMEDIATE | len);
+	append_data(desc, data, len);
+}
+
+#define APPEND_CMD_RET(cmd, op) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
+{ \
+	u32 *cmd = desc_end(desc); \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | options); \
+	return cmd; \
+}
+APPEND_CMD_RET(jump, JUMP)
+APPEND_CMD_RET(move, MOVE)
+
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
+{
+	*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
+				  (desc_len(desc) - (jump_cmd - desc)));
+}
+
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
+{
+	u32 val = caam32_to_cpu(*move_cmd);
+
+	val &= ~MOVE_OFFSET_MASK;
+	val |= (desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+	*move_cmd = cpu_to_caam32(val);
+}
+
+#define APPEND_CMD(cmd, op) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | options); \
+}
+APPEND_CMD(operation, OPERATION)
+
+#define APPEND_CMD_LEN(cmd, op) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+				u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | len | options); \
+}
+
+APPEND_CMD_LEN(seq_load, SEQ_LOAD)
+APPEND_CMD_LEN(seq_store, SEQ_STORE)
+APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
+APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
+
+#define APPEND_CMD_PTR(cmd, op) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+				unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR(key, KEY)
+APPEND_CMD_PTR(load, LOAD)
+APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
+APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+				unsigned int len, u32 options)
+{
+	u32 cmd_src;
+
+	cmd_src = options & LDST_SRCDST_MASK;
+
+	append_cmd(desc, CMD_STORE | options | len);
+
+	/* The following options do not require pointer */
+	if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED ||
+	      cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB    ||
+	      cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE ||
+	      cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE))
+		append_ptr(desc, ptr);
+}
+
+#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+						 dma_addr_t ptr, \
+						 unsigned int len, \
+						 u32 options) \
+{ \
+	PRINT_POS; \
+	if (options & (SQIN_RTO | SQIN_PRE)) \
+		append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \
+	else \
+		append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
+}
+APPEND_SEQ_PTR_INTLEN(in, IN)
+APPEND_SEQ_PTR_INTLEN(out, OUT)
+
+#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
+					 unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd_data(desc, data, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR_TO_IMM(load, LOAD);
+APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+
+#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
+					 unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
+}
+APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR)
+APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
+
+/*
+ * Determine whether to store length internally or externally depending on
+ * the size of its type
+ */
+#define APPEND_CMD_PTR_LEN(cmd, op, type) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+				type len, u32 options) \
+{ \
+	PRINT_POS; \
+	if (sizeof(type) > sizeof(u16)) \
+		append_##cmd##_extlen(desc, ptr, len, options); \
+	else \
+		append_##cmd##_intlen(desc, ptr, len, options); \
+}
+APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32)
+APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
+
+/*
+ * 2nd variant for commands whose specified immediate length differs
+ * from length of immediate data provided, e.g., split keys
+ */
+#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
+					 unsigned int data_len, \
+					 unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
+	append_data(desc, data, data_len); \
+}
+APPEND_CMD_PTR_TO_IMM2(key, KEY);
+
+#define APPEND_CMD_RAW_IMM(cmd, op, type) \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
+					     u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+	append_cmd(desc, immediate); \
+}
+APPEND_CMD_RAW_IMM(load, LOAD, u32);
+
+/*
+ * ee - endianness
+ * size - size of immediate type in bytes
+ */
+#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \
+static inline void append_##cmd##_imm_##ee##size(u32 *desc, \
+						   u##size immediate, \
+						   u32 options) \
+{ \
+	__##ee##size data = cpu_to_##ee##size(immediate); \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \
+	append_data(desc, &data, sizeof(data)); \
+}
+
+APPEND_CMD_RAW_IMM2(load, LOAD, be, 32);
+
+/*
+ * Append math command. Only the last part of destination and source need to
+ * be specified
+ */
+#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+	MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len);
+
+#define append_math_add(desc, dest, src0, src1, len) \
+	APPEND_MATH(ADD, desc, dest, src0, src1, len)
+#define append_math_sub(desc, dest, src0, src1, len) \
+	APPEND_MATH(SUB, desc, dest, src0, src1, len)
+#define append_math_add_c(desc, dest, src0, src1, len) \
+	APPEND_MATH(ADDC, desc, dest, src0, src1, len)
+#define append_math_sub_b(desc, dest, src0, src1, len) \
+	APPEND_MATH(SUBB, desc, dest, src0, src1, len)
+#define append_math_and(desc, dest, src0, src1, len) \
+	APPEND_MATH(AND, desc, dest, src0, src1, len)
+#define append_math_or(desc, dest, src0, src1, len) \
+	APPEND_MATH(OR, desc, dest, src0, src1, len)
+#define append_math_xor(desc, dest, src0, src1, len) \
+	APPEND_MATH(XOR, desc, dest, src0, src1, len)
+#define append_math_lshift(desc, dest, src0, src1, len) \
+	APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+#define append_math_rshift(desc, dest, src0, src1, len) \
+	APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+#define append_math_ldshift(desc, dest, src0, src1, len) \
+	APPEND_MATH(SHLD, desc, dest, src0, src1, len)
+
+/* Exactly one source is IMM. Data is passed in as u32 value */
+#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+do { \
+	APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+	append_cmd(desc, data); \
+} while (0)
+
+#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
+
+/* Exactly one source is IMM. Data is passed in as u64 value */
+#define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \
+do { \
+	u32 upper = (data >> 16) >> 16; \
+	APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \
+		    (upper ? 0 : MATH_IFB)); \
+	if (upper) \
+		append_u64(desc, data); \
+	else \
+		append_u32(desc, lower_32_bits(data)); \
+} while (0)
+
+#define append_math_add_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ *           functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ *       is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ *              referenced by the descriptor
+ */
+struct alginfo {
+	u32 algtype;
+	unsigned int keylen;
+	unsigned int keylen_pad;
+	union {
+		dma_addr_t key_dma;
+		const void *key_virt;
+	};
+	bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ *                       and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ *               excluding the data items to be inlined (or corresponding
+ *               pointer if an item is not inlined). Each cnstr_* function that
+ *               generates descriptors should have a define mentioning
+ *               corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ *          together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ *            otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ *         check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+				    unsigned int jd_len, unsigned int *data_len,
+				    u32 *inl_mask, unsigned int count)
+{
+	int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+	unsigned int i;
+
+	*inl_mask = 0;
+	for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+		if (rem_bytes - (int)(data_len[i] +
+			(count - i - 1) * CAAM_PTR_SZ) >= 0) {
+			rem_bytes -= data_len[i];
+			*inl_mask |= (1 << i);
+		} else {
+			rem_bytes -= CAAM_PTR_SZ;
+		}
+	}
+
+	return (rem_bytes >= 0) ? 0 : -1;
+}
+
+/**
+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ *         keylen should be the length of initial key, while keylen_pad
+ *         the length of the derived (split) key.
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ *         SHA256, SHA384, SHA512}.
+ */
+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
+{
+	u32 protid;
+
+	/*
+	 * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
+	 * to OP_PCLID_DKP_{MD5, SHA*}
+	 */
+	protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
+		 (0x20 << OP_ALG_ALGSEL_SHIFT);
+
+	if (adata->key_inline) {
+		int words;
+
+		append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+				 OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
+				 adata->keylen);
+		append_data(desc, adata->key_virt, adata->keylen);
+
+		/* Reserve space in descriptor buffer for the derived key */
+		words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+			 ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
+		if (words)
+			(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
+	} else {
+		append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+				 OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
+				 adata->keylen);
+		append_ptr(desc, adata->key_dma);
+	}
+}
+
+#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
new file mode 100644
index 0000000..8da88be
--- /dev/null
+++ b/drivers/crypto/caam/error.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CAAM Error Reporting
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "desc.h"
+#include "error.h"
+
+#ifdef DEBUG
+#include <linux/highmem.h>
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+		  int rowsize, int groupsize, struct scatterlist *sg,
+		  size_t tlen, bool ascii)
+{
+	struct scatterlist *it;
+	void *it_page;
+	size_t len;
+	void *buf;
+
+	for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+		/*
+		 * make sure the scatterlist's page
+		 * has a valid virtual memory mapping
+		 */
+		it_page = kmap_atomic(sg_page(it));
+		if (unlikely(!it_page)) {
+			pr_err("caam_dump_sg: kmap failed\n");
+			return;
+		}
+
+		buf = it_page + it->offset;
+		len = min_t(size_t, tlen, it->length);
+		print_hex_dump(level, prefix_str, prefix_type, rowsize,
+			       groupsize, buf, len, ascii);
+		tlen -= len;
+
+		kunmap_atomic(it_page);
+	}
+}
+#else
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+		  int rowsize, int groupsize, struct scatterlist *sg,
+		  size_t tlen, bool ascii)
+{}
+#endif /* DEBUG */
+EXPORT_SYMBOL(caam_dump_sg);
+
+static const struct {
+	u8 value;
+	const char *error_text;
+} desc_error_list[] = {
+	{ 0x00, "No error." },
+	{ 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." },
+	{ 0x02, "SGT Null Entry Error." },
+	{ 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." },
+	{ 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." },
+	{ 0x05, "Reserved." },
+	{ 0x06, "Invalid KEY Command" },
+	{ 0x07, "Invalid LOAD Command" },
+	{ 0x08, "Invalid STORE Command" },
+	{ 0x09, "Invalid OPERATION Command" },
+	{ 0x0A, "Invalid FIFO LOAD Command" },
+	{ 0x0B, "Invalid FIFO STORE Command" },
+	{ 0x0C, "Invalid MOVE/MOVE_LEN Command" },
+	{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." },
+	{ 0x0E, "Invalid MATH Command" },
+	{ 0x0F, "Invalid SIGNATURE Command" },
+	{ 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." },
+	{ 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
+	{ 0x12, "Shared Descriptor Header Error" },
+	{ 0x13, "Header Error. Invalid length or parity, or certain other problems." },
+	{ 0x14, "Burster Error. Burster has gotten to an illegal state" },
+	{ 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." },
+	{ 0x16, "DMA Error" },
+	{ 0x17, "Reserved." },
+	{ 0x1A, "Job failed due to JR reset" },
+	{ 0x1B, "Job failed due to Fail Mode" },
+	{ 0x1C, "DECO Watchdog timer timeout error" },
+	{ 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" },
+	{ 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" },
+	{ 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." },
+	{ 0x20, "DECO has completed a reset initiated via the DRR register" },
+	{ 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." },
+	{ 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." },
+	{ 0x23, "Read Input Frame error" },
+	{ 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
+	{ 0x80, "DNR (do not run) error" },
+	{ 0x81, "undefined protocol command" },
+	{ 0x82, "invalid setting in PDB" },
+	{ 0x83, "Anti-replay LATE error" },
+	{ 0x84, "Anti-replay REPLAY error" },
+	{ 0x85, "Sequence number overflow" },
+	{ 0x86, "Sigver invalid signature" },
+	{ 0x87, "DSA Sign Illegal test descriptor" },
+	{ 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." },
+	{ 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." },
+	{ 0xC1, "Blob Command error: Undefined mode" },
+	{ 0xC2, "Blob Command error: Secure Memory Blob mode error" },
+	{ 0xC4, "Blob Command error: Black Blob key or input size error" },
+	{ 0xC5, "Blob Command error: Invalid key destination" },
+	{ 0xC8, "Blob Command error: Trusted/Secure mode error" },
+	{ 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" },
+	{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+};
+
+static const char * const cha_id_list[] = {
+	"",
+	"AES",
+	"DES",
+	"ARC4",
+	"MDHA",
+	"RNG",
+	"SNOW f8",
+	"Kasumi f8/9",
+	"PKHA",
+	"CRCA",
+	"SNOW f9",
+	"ZUCE",
+	"ZUCA",
+};
+
+static const char * const err_id_list[] = {
+	"No error.",
+	"Mode error.",
+	"Data size error.",
+	"Key size error.",
+	"PKHA A memory size error.",
+	"PKHA B memory size error.",
+	"Data arrived out of sequence error.",
+	"PKHA divide-by-zero error.",
+	"PKHA modulus even error.",
+	"DES key parity error.",
+	"ICV check failed.",
+	"Hardware error.",
+	"Unsupported CCM AAD size.",
+	"Class 1 CHA is not reset",
+	"Invalid CHA combination was selected",
+	"Invalid CHA selected.",
+};
+
+static const char * const rng_err_id_list[] = {
+	"",
+	"",
+	"",
+	"Instantiate",
+	"Not instantiated",
+	"Test instantiate",
+	"Prediction resistance",
+	"Prediction resistance and test request",
+	"Uninstantiate",
+	"Secure key generation",
+};
+
+static void report_ccb_status(struct device *jrdev, const u32 status,
+			      const char *error)
+{
+	u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
+		    JRSTA_CCBERR_CHAID_SHIFT;
+	u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+	u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+		  JRSTA_DECOERR_INDEX_SHIFT;
+	char *idx_str;
+	const char *cha_str = "unidentified cha_id value 0x";
+	char cha_err_code[3] = { 0 };
+	const char *err_str = "unidentified err_id value 0x";
+	char err_err_code[3] = { 0 };
+
+	if (status & JRSTA_DECOERR_JUMP)
+		idx_str = "jump tgt desc idx";
+	else
+		idx_str = "desc idx";
+
+	if (cha_id < ARRAY_SIZE(cha_id_list))
+		cha_str = cha_id_list[cha_id];
+	else
+		snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
+
+	if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
+	    err_id < ARRAY_SIZE(rng_err_id_list) &&
+	    strlen(rng_err_id_list[err_id])) {
+		/* RNG-only error */
+		err_str = rng_err_id_list[err_id];
+	} else {
+		err_str = err_id_list[err_id];
+	}
+
+	/*
+	 * CCB ICV check failures are part of normal operation life;
+	 * we leave the upper layers to do what they want with them.
+	 */
+	if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+		dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
+			status, error, idx_str, idx,
+			cha_str, cha_err_code,
+			err_str, err_err_code);
+}
+
+static void report_jump_status(struct device *jrdev, const u32 status,
+			       const char *error)
+{
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
+}
+
+static void report_deco_status(struct device *jrdev, const u32 status,
+			       const char *error)
+{
+	u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
+	u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+		  JRSTA_DECOERR_INDEX_SHIFT;
+	char *idx_str;
+	const char *err_str = "unidentified error value 0x";
+	char err_err_code[3] = { 0 };
+	int i;
+
+	if (status & JRSTA_DECOERR_JUMP)
+		idx_str = "jump tgt desc idx";
+	else
+		idx_str = "desc idx";
+
+	for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
+		if (desc_error_list[i].value == err_id)
+			break;
+
+	if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
+		err_str = desc_error_list[i].error_text;
+	else
+		snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+	dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
+		status, error, idx_str, idx, err_str, err_err_code);
+}
+
+static void report_jr_status(struct device *jrdev, const u32 status,
+			     const char *error)
+{
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
+}
+
+static void report_cond_code_status(struct device *jrdev, const u32 status,
+				    const char *error)
+{
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
+}
+
+void caam_jr_strstatus(struct device *jrdev, u32 status)
+{
+	static const struct stat_src {
+		void (*report_ssed)(struct device *jrdev, const u32 status,
+				    const char *error);
+		const char *error;
+	} status_src[16] = {
+		{ NULL, "No error" },
+		{ NULL, NULL },
+		{ report_ccb_status, "CCB" },
+		{ report_jump_status, "Jump" },
+		{ report_deco_status, "DECO" },
+		{ NULL, "Queue Manager Interface" },
+		{ report_jr_status, "Job Ring" },
+		{ report_cond_code_status, "Condition Code" },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+	};
+	u32 ssrc = status >> JRSTA_SSRC_SHIFT;
+	const char *error = status_src[ssrc].error;
+
+	/*
+	 * If there is an error handling function, call it to report the error.
+	 * Otherwise print the error source name.
+	 */
+	if (status_src[ssrc].report_ssed)
+		status_src[ssrc].report_ssed(jrdev, status, error);
+	else if (error)
+		dev_err(jrdev, "%d: %s\n", ssrc, error);
+	else
+		dev_err(jrdev, "%d: unknown error source\n", ssrc);
+}
+EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
new file mode 100644
index 0000000..5aa332b
--- /dev/null
+++ b/drivers/crypto/caam/error.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM Error Reporting code header
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_ERROR_H
+#define CAAM_ERROR_H
+#define CAAM_ERROR_STR_MAX 302
+void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+		  int rowsize, int groupsize, struct scatterlist *sg,
+		  size_t tlen, bool ascii);
+#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
new file mode 100644
index 0000000..babc78a
--- /dev/null
+++ b/drivers/crypto/caam/intern.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM/SEC 4.x driver backend
+ * Private/internal definitions between modules
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef INTERN_H
+#define INTERN_H
+
+/* Currently comes from Kconfig param as a ^2 (driver-required) */
+#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
+
+/* Kconfig params for interrupt coalescing if selected (else zero) */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
+#define JOBR_INTC JRCFG_ICEN
+#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+#else
+#define JOBR_INTC 0
+#define JOBR_INTC_TIME_THLD 0
+#define JOBR_INTC_COUNT_THLD 0
+#endif
+
+/*
+ * Storage for tracking each in-process entry moving across a ring
+ * Each entry on an output ring needs one of these
+ */
+struct caam_jrentry_info {
+	void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
+	void *cbkarg;	/* Argument per ring entry */
+	u32 *desc_addr_virt;	/* Stored virt addr for postprocessing */
+	dma_addr_t desc_addr_dma;	/* Stored bus addr for done matching */
+	u32 desc_size;	/* Stored size for postprocessing, header derived */
+};
+
+/* Private sub-storage for a single JobR */
+struct caam_drv_private_jr {
+	struct list_head	list_node;	/* Job Ring device list */
+	struct device		*dev;
+	int ridx;
+	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
+	struct tasklet_struct irqtask;
+	int irq;			/* One per queue */
+
+	/* Number of scatterlist crypt transforms active on the JobR */
+	atomic_t tfm_count ____cacheline_aligned;
+
+	/* Job ring info */
+	int ringsize;	/* Size of rings (assume input = output) */
+	struct caam_jrentry_info *entinfo;	/* Alloc'ed 1 per ring entry */
+	spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
+	int inp_ring_write_index;	/* Input index "tail" */
+	int head;			/* entinfo (s/w ring) head index */
+	dma_addr_t *inpring;	/* Base of input ring, alloc DMA-safe */
+	spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
+	int out_ring_read_index;	/* Output index "tail" */
+	int tail;			/* entinfo (s/w ring) tail index */
+	struct jr_outentry *outring;	/* Base of output ring, DMA-safe */
+};
+
+/*
+ * Driver-private storage for a single CAAM block instance
+ */
+struct caam_drv_private {
+#ifdef CONFIG_CAAM_QI
+	struct device *qidev;
+#endif
+
+	/* Physical-presence section */
+	struct caam_ctrl __iomem *ctrl; /* controller region */
+	struct caam_deco __iomem *deco; /* DECO/CCB views */
+	struct caam_assurance __iomem *assure;
+	struct caam_queue_if __iomem *qi; /* QI control region */
+	struct caam_job_ring __iomem *jr[4];	/* JobR's register space */
+
+	/*
+	 * Detected geometry block. Filled in from device tree if powerpc,
+	 * or from register-based version detection code
+	 */
+	u8 total_jobrs;		/* Total Job Rings in device */
+	u8 qi_present;		/* Nonzero if QI present in device */
+	u8 mc_en;		/* Nonzero if MC f/w is active */
+	int secvio_irq;		/* Security violation interrupt number */
+	int virt_en;		/* Virtualization enabled in CAAM */
+	int era;		/* CAAM Era (internal HW revision) */
+
+#define	RNG4_MAX_HANDLES 2
+	/* RNG4 block */
+	u32 rng4_sh_init;	/* This bitmap shows which of the State
+				   Handles of the RNG4 block are initialized
+				   by this driver */
+
+	struct clk *caam_ipg;
+	struct clk *caam_mem;
+	struct clk *caam_aclk;
+	struct clk *caam_emi_slow;
+
+	/*
+	 * debugfs entries for developer view into driver/device
+	 * variables at runtime.
+	 */
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dfs_root;
+	struct dentry *ctl; /* controller dir */
+	struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
+	struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
+#endif
+};
+
+void caam_jr_algapi_init(struct device *dev);
+void caam_jr_algapi_remove(struct device *dev);
+
+#ifdef CONFIG_DEBUG_FS
+static int caam_debugfs_u64_get(void *data, u64 *val)
+{
+	*val = caam64_to_cpu(*(u64 *)data);
+	return 0;
+}
+
+static int caam_debugfs_u32_get(void *data, u64 *val)
+{
+	*val = caam32_to_cpu(*(u32 *)data);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+#endif
+
+#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
new file mode 100644
index 0000000..acdd720
--- /dev/null
+++ b/drivers/crypto/caam/jr.c
@@ -0,0 +1,582 @@
+/*
+ * CAAM/SEC 4.x transport/backend driver
+ * JobR backend functionality
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include "compat.h"
+#include "ctrl.h"
+#include "regs.h"
+#include "jr.h"
+#include "desc.h"
+#include "intern.h"
+
+struct jr_driver_data {
+	/* List of Physical JobR's with the Driver */
+	struct list_head	jr_list;
+	spinlock_t		jr_alloc_lock;	/* jr_list lock */
+} ____cacheline_aligned;
+
+static struct jr_driver_data driver_data;
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	unsigned int timeout = 100000;
+
+	/*
+	 * mask interrupts since we are going to poll
+	 * for reset completion status
+	 */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+
+	/* initiate flush (required prior to reset) */
+	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+		JRINT_ERR_HALT_INPROGRESS) && --timeout)
+		cpu_relax();
+
+	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+		return -EIO;
+	}
+
+	/* initiate reset */
+	timeout = 100000;
+	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+		cpu_relax();
+
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+		return -EIO;
+	}
+
+	/* unmask interrupts */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+
+	return 0;
+}
+
+/*
+ * Shutdown JobR independent of platform property code
+ */
+static int caam_jr_shutdown(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	dma_addr_t inpbusaddr, outbusaddr;
+	int ret;
+
+	ret = caam_reset_hw_jr(dev);
+
+	tasklet_kill(&jrp->irqtask);
+
+	/* Release interrupt */
+	free_irq(jrp->irq, dev);
+
+	/* Free rings */
+	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+			  jrp->inpring, inpbusaddr);
+	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+			  jrp->outring, outbusaddr);
+	kfree(jrp->entinfo);
+
+	return ret;
+}
+
+static int caam_jr_remove(struct platform_device *pdev)
+{
+	int ret;
+	struct device *jrdev;
+	struct caam_drv_private_jr *jrpriv;
+
+	jrdev = &pdev->dev;
+	jrpriv = dev_get_drvdata(jrdev);
+
+	/*
+	 * Return EBUSY if job ring already allocated.
+	 */
+	if (atomic_read(&jrpriv->tfm_count)) {
+		dev_err(jrdev, "Device is busy\n");
+		return -EBUSY;
+	}
+
+	/* Remove the node from Physical JobR list maintained by driver */
+	spin_lock(&driver_data.jr_alloc_lock);
+	list_del(&jrpriv->list_node);
+	spin_unlock(&driver_data.jr_alloc_lock);
+
+	/* Release ring */
+	ret = caam_jr_shutdown(jrdev);
+	if (ret)
+		dev_err(jrdev, "Failed to shut down job ring\n");
+	irq_dispose_mapping(jrpriv->irq);
+
+	return ret;
+}
+
+/* Main per-ring interrupt handler */
+static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+{
+	struct device *dev = st_dev;
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	u32 irqstate;
+
+	/*
+	 * Check the output ring for ready responses, kick
+	 * tasklet if jobs done.
+	 */
+	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
+	if (!irqstate)
+		return IRQ_NONE;
+
+	/*
+	 * If JobR error, we got more development work to do
+	 * Flag a bug now, but we really need to shut down and
+	 * restart the queue (and fix code).
+	 */
+	if (irqstate & JRINT_JR_ERROR) {
+		dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
+		BUG();
+	}
+
+	/* mask valid interrupts */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+
+	/* Have valid interrupt at this point, just ACK and trigger */
+	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
+
+	preempt_disable();
+	tasklet_schedule(&jrp->irqtask);
+	preempt_enable();
+
+	return IRQ_HANDLED;
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
+{
+	int hw_idx, sw_idx, i, head, tail;
+	struct device *dev = (struct device *)devarg;
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+	u32 *userdesc, userstatus;
+	void *userarg;
+
+	while (rd_reg32(&jrp->rregs->outring_used)) {
+
+		head = READ_ONCE(jrp->head);
+
+		spin_lock(&jrp->outlock);
+
+		sw_idx = tail = jrp->tail;
+		hw_idx = jrp->out_ring_read_index;
+
+		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
+			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
+
+			if (jrp->outring[hw_idx].desc ==
+			    caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
+				break; /* found */
+		}
+		/* we should never fail to find a matching descriptor */
+		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
+
+		/* Unmap just-run descriptor so we can post-process */
+		dma_unmap_single(dev,
+				 caam_dma_to_cpu(jrp->outring[hw_idx].desc),
+				 jrp->entinfo[sw_idx].desc_size,
+				 DMA_TO_DEVICE);
+
+		/* mark completed, avoid matching on a recycled desc addr */
+		jrp->entinfo[sw_idx].desc_addr_dma = 0;
+
+		/* Stash callback params for use outside of lock */
+		usercall = jrp->entinfo[sw_idx].callbk;
+		userarg = jrp->entinfo[sw_idx].cbkarg;
+		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
+		userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
+
+		/*
+		 * Make sure all information from the job has been obtained
+		 * before telling CAAM that the job has been removed from the
+		 * output ring.
+		 */
+		mb();
+
+		/* set done */
+		wr_reg32(&jrp->rregs->outring_rmvd, 1);
+
+		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
+					   (JOBR_DEPTH - 1);
+
+		/*
+		 * if this job completed out-of-order, do not increment
+		 * the tail.  Otherwise, increment tail by 1 plus the
+		 * number of subsequent jobs already completed out-of-order
+		 */
+		if (sw_idx == tail) {
+			do {
+				tail = (tail + 1) & (JOBR_DEPTH - 1);
+			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+				 jrp->entinfo[tail].desc_addr_dma == 0);
+
+			jrp->tail = tail;
+		}
+
+		spin_unlock(&jrp->outlock);
+
+		/* Finally, execute user's callback */
+		usercall(dev, userdesc, userstatus, userarg);
+	}
+
+	/* reenable / unmask IRQs */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+}
+
+/**
+ * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
+ *
+ * returns :  pointer to the newly allocated physical
+ *	      JobR dev can be written to if successful.
+ **/
+struct device *caam_jr_alloc(void)
+{
+	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+	struct device *dev = ERR_PTR(-ENODEV);
+	int min_tfm_cnt	= INT_MAX;
+	int tfm_cnt;
+
+	spin_lock(&driver_data.jr_alloc_lock);
+
+	if (list_empty(&driver_data.jr_list)) {
+		spin_unlock(&driver_data.jr_alloc_lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
+		tfm_cnt = atomic_read(&jrpriv->tfm_count);
+		if (tfm_cnt < min_tfm_cnt) {
+			min_tfm_cnt = tfm_cnt;
+			min_jrpriv = jrpriv;
+		}
+		if (!min_tfm_cnt)
+			break;
+	}
+
+	if (min_jrpriv) {
+		atomic_inc(&min_jrpriv->tfm_count);
+		dev = min_jrpriv->dev;
+	}
+	spin_unlock(&driver_data.jr_alloc_lock);
+
+	return dev;
+}
+EXPORT_SYMBOL(caam_jr_alloc);
+
+/**
+ * caam_jr_free() - Free the Job Ring
+ * @rdev     - points to the dev that identifies the Job ring to
+ *             be released.
+ **/
+void caam_jr_free(struct device *rdev)
+{
+	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
+
+	atomic_dec(&jrpriv->tfm_count);
+}
+EXPORT_SYMBOL(caam_jr_free);
+
+/**
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
+ * -EBUSY if the queue is full, -EIO if it cannot map the caller's
+ * descriptor.
+ * @dev:  device of the job ring to be used. This device should have
+ *        been assigned prior by caam_jr_register().
+ * @desc: points to a job descriptor that execute our request. All
+ *        descriptors (and all referenced data) must be in a DMAable
+ *        region, and all data references must be physical addresses
+ *        accessible to CAAM (i.e. within a PAMU window granted
+ *        to it).
+ * @cbk:  pointer to a callback function to be invoked upon completion
+ *        of this request. This has the form:
+ *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
+ *        where:
+ *        @dev:    contains the job ring device that processed this
+ *                 response.
+ *        @desc:   descriptor that initiated the request, same as
+ *                 "desc" being argued to caam_jr_enqueue().
+ *        @status: untranslated status received from CAAM. See the
+ *                 reference manual for a detailed description of
+ *                 error meaning, or see the JRSTA definitions in the
+ *                 register header file
+ *        @areq:   optional pointer to an argument passed with the
+ *                 original request
+ * @areq: optional pointer to a user argument for use at callback
+ *        time.
+ **/
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+		    void (*cbk)(struct device *dev, u32 *desc,
+				u32 status, void *areq),
+		    void *areq)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	struct caam_jrentry_info *head_entry;
+	int head, tail, desc_size;
+	dma_addr_t desc_dma;
+
+	desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
+	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, desc_dma)) {
+		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
+		return -EIO;
+	}
+
+	spin_lock_bh(&jrp->inplock);
+
+	head = jrp->head;
+	tail = READ_ONCE(jrp->tail);
+
+	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
+		spin_unlock_bh(&jrp->inplock);
+		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
+		return -EBUSY;
+	}
+
+	head_entry = &jrp->entinfo[head];
+	head_entry->desc_addr_virt = desc;
+	head_entry->desc_size = desc_size;
+	head_entry->callbk = (void *)cbk;
+	head_entry->cbkarg = areq;
+	head_entry->desc_addr_dma = desc_dma;
+
+	jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
+
+	/*
+	 * Guarantee that the descriptor's DMA address has been written to
+	 * the next slot in the ring before the write index is updated, since
+	 * other cores may update this index independently.
+	 */
+	smp_wmb();
+
+	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
+				    (JOBR_DEPTH - 1);
+	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+
+	/*
+	 * Ensure that all job information has been written before
+	 * notifying CAAM that a new job was added to the input ring.
+	 */
+	wmb();
+
+	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+
+	spin_unlock_bh(&jrp->inplock);
+
+	return 0;
+}
+EXPORT_SYMBOL(caam_jr_enqueue);
+
+/*
+ * Init JobR independent of platform property detection
+ */
+static int caam_jr_init(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp;
+	dma_addr_t inpbusaddr, outbusaddr;
+	int i, error;
+
+	jrp = dev_get_drvdata(dev);
+
+	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
+	/* Connect job ring interrupt handler. */
+	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+			    dev_name(dev), dev);
+	if (error) {
+		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+			jrp->ridx, jrp->irq);
+		goto out_kill_deq;
+	}
+
+	error = caam_reset_hw_jr(dev);
+	if (error)
+		goto out_free_irq;
+
+	error = -ENOMEM;
+	jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
+					  JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
+	if (!jrp->inpring)
+		goto out_free_irq;
+
+	jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
+					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+	if (!jrp->outring)
+		goto out_free_inpring;
+
+	jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
+	if (!jrp->entinfo)
+		goto out_free_outring;
+
+	for (i = 0; i < JOBR_DEPTH; i++)
+		jrp->entinfo[i].desc_addr_dma = !0;
+
+	/* Setup rings */
+	jrp->inp_ring_write_index = 0;
+	jrp->out_ring_read_index = 0;
+	jrp->head = 0;
+	jrp->tail = 0;
+
+	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+	jrp->ringsize = JOBR_DEPTH;
+
+	spin_lock_init(&jrp->inplock);
+	spin_lock_init(&jrp->outlock);
+
+	/* Select interrupt coalescing parameters */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
+		      (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+		      (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+
+	return 0;
+
+out_free_outring:
+	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+			  jrp->outring, outbusaddr);
+out_free_inpring:
+	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+			  jrp->inpring, inpbusaddr);
+	dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
+out_free_irq:
+	free_irq(jrp->irq, dev);
+out_kill_deq:
+	tasklet_kill(&jrp->irqtask);
+	return error;
+}
+
+
+/*
+ * Probe routine for each detected JobR subsystem.
+ */
+static int caam_jr_probe(struct platform_device *pdev)
+{
+	struct device *jrdev;
+	struct device_node *nprop;
+	struct caam_job_ring __iomem *ctrl;
+	struct caam_drv_private_jr *jrpriv;
+	static int total_jobrs;
+	int error;
+
+	jrdev = &pdev->dev;
+	jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
+	if (!jrpriv)
+		return -ENOMEM;
+
+	dev_set_drvdata(jrdev, jrpriv);
+
+	/* save ring identity relative to detection */
+	jrpriv->ridx = total_jobrs++;
+
+	nprop = pdev->dev.of_node;
+	/* Get configuration properties from device tree */
+	/* First, get register page */
+	ctrl = of_iomap(nprop, 0);
+	if (!ctrl) {
+		dev_err(jrdev, "of_iomap() failed\n");
+		return -ENOMEM;
+	}
+
+	jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
+
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		if (caam_dpaa2)
+			error = dma_set_mask_and_coherent(jrdev,
+							  DMA_BIT_MASK(49));
+		else if (of_device_is_compatible(nprop,
+						 "fsl,sec-v5.0-job-ring"))
+			error = dma_set_mask_and_coherent(jrdev,
+							  DMA_BIT_MASK(40));
+		else
+			error = dma_set_mask_and_coherent(jrdev,
+							  DMA_BIT_MASK(36));
+	} else {
+		error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
+	}
+	if (error) {
+		dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
+			error);
+		iounmap(ctrl);
+		return error;
+	}
+
+	/* Identify the interrupt */
+	jrpriv->irq = irq_of_parse_and_map(nprop, 0);
+
+	/* Now do the platform independent part */
+	error = caam_jr_init(jrdev); /* now turn on hardware */
+	if (error) {
+		irq_dispose_mapping(jrpriv->irq);
+		iounmap(ctrl);
+		return error;
+	}
+
+	jrpriv->dev = jrdev;
+	spin_lock(&driver_data.jr_alloc_lock);
+	list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+	spin_unlock(&driver_data.jr_alloc_lock);
+
+	atomic_set(&jrpriv->tfm_count, 0);
+
+	return 0;
+}
+
+static const struct of_device_id caam_jr_match[] = {
+	{
+		.compatible = "fsl,sec-v4.0-job-ring",
+	},
+	{
+		.compatible = "fsl,sec4.0-job-ring",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, caam_jr_match);
+
+static struct platform_driver caam_jr_driver = {
+	.driver = {
+		.name = "caam_jr",
+		.of_match_table = caam_jr_match,
+	},
+	.probe       = caam_jr_probe,
+	.remove      = caam_jr_remove,
+};
+
+static int __init jr_driver_init(void)
+{
+	spin_lock_init(&driver_data.jr_alloc_lock);
+	INIT_LIST_HEAD(&driver_data.jr_list);
+	return platform_driver_register(&caam_jr_driver);
+}
+
+static void __exit jr_driver_exit(void)
+{
+	platform_driver_unregister(&caam_jr_driver);
+}
+
+module_init(jr_driver_init);
+module_exit(jr_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM JR request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
new file mode 100644
index 0000000..eab6115
--- /dev/null
+++ b/drivers/crypto/caam/jr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM public-level include definitions for the JobR backend
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef JR_H
+#define JR_H
+
+/* Prototypes for backend-level services exposed to APIs */
+struct device *caam_jr_alloc(void);
+void caam_jr_free(struct device *rdev);
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+		    void (*cbk)(struct device *dev, u32 *desc, u32 status,
+				void *areq),
+		    void *areq);
+
+#endif /* JR_H */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
new file mode 100644
index 0000000..312b5f0
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CAAM/SEC 4.x functions for handling key-generation jobs
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+#include "compat.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "key_gen.h"
+
+void split_key_done(struct device *dev, u32 *desc, u32 err,
+			   void *context)
+{
+	struct split_key_result *res = context;
+
+#ifdef DEBUG
+	dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	res->err = err;
+
+	complete(&res->completion);
+}
+EXPORT_SYMBOL(split_key_done);
+/*
+get a split ipad/opad key
+
+Split key generation-----------------------------------------------
+
+[00] 0xb0810008    jobdesc: stidx=1 share=never len=8
+[01] 0x04000014        key: class2->keyreg len=20
+			@0xffe01000
+[03] 0x84410014  operation: cls2-op sha1 hmac init dec
+[04] 0x24940000     fifold: class2 msgdata-last2 len=0 imm
+[05] 0xa4000001       jump: class2 local all ->1 [06]
+[06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
+			@0xffe04000
+*/
+int gen_split_key(struct device *jrdev, u8 *key_out,
+		  struct alginfo * const adata, const u8 *key_in, u32 keylen,
+		  int max_keylen)
+{
+	u32 *desc;
+	struct split_key_result result;
+	dma_addr_t dma_addr_in, dma_addr_out;
+	int ret = -ENOMEM;
+
+	adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+	adata->keylen_pad = split_key_pad_len(adata->algtype &
+					      OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+	dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+		adata->keylen, adata->keylen_pad);
+	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+	if (adata->keylen_pad > max_keylen)
+		return -EINVAL;
+
+	desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+	if (!desc) {
+		dev_err(jrdev, "unable to allocate key input memory\n");
+		return ret;
+	}
+
+	dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
+				     DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, dma_addr_in)) {
+		dev_err(jrdev, "unable to map key input memory\n");
+		goto out_free;
+	}
+
+	dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
+				      DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, dma_addr_out)) {
+		dev_err(jrdev, "unable to map key output memory\n");
+		goto out_unmap_in;
+	}
+
+	init_job_desc(desc, 0);
+	append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+
+	/* Sets MDHA up into an HMAC-INIT */
+	append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+			 OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+			 OP_ALG_AS_INIT);
+
+	/*
+	 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+	 * into both pads inside MDHA
+	 */
+	append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+				FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+	/*
+	 * FIFO_STORE with the explicit split-key content store
+	 * (0x26 output type)
+	 */
+	append_fifo_store(desc, dma_addr_out, adata->keylen,
+			  LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	result.err = 0;
+	init_completion(&result.completion);
+
+	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+	if (!ret) {
+		/* in progress */
+		wait_for_completion(&result.completion);
+		ret = result.err;
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+			       adata->keylen_pad, 1);
+#endif
+	}
+
+	dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
+			 DMA_FROM_DEVICE);
+out_unmap_in:
+	dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+out_free:
+	kfree(desc);
+	return ret;
+}
+EXPORT_SYMBOL(gen_split_key);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
new file mode 100644
index 0000000..818f78f
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM/SEC 4.x definitions for handling key-generation jobs
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ *        SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+	u32 idx;
+
+	idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+	return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ *        SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+	return ALIGN(split_key_len(hash), 16);
+}
+
+struct split_key_result {
+	struct completion completion;
+	int err;
+};
+
+void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
+
+int gen_split_key(struct device *jrdev, u8 *key_out,
+		  struct alginfo * const adata, const u8 *key_in, u32 keylen,
+		  int max_keylen);
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
new file mode 100644
index 0000000..810f0be
--- /dev/null
+++ b/drivers/crypto/caam/pdb.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM Protocol Data Block (PDB) definition header file
+ *
+ * Copyright 2008-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef CAAM_PDB_H
+#define CAAM_PDB_H
+#include "compat.h"
+
+/*
+ * PDB- IPSec ESP Header Modification Options
+ */
+#define PDBHMO_ESP_DECAP_SHIFT	28
+#define PDBHMO_ESP_ENCAP_SHIFT	28
+/*
+ * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
+ * Options Byte IP version (IPvsn) field:
+ * if IPv4, decrement the inner IP header TTL field (byte 8);
+ * if IPv6 decrement the inner IP header Hop Limit field (byte 7).
+*/
+#define PDBHMO_ESP_DECAP_DEC_TTL	(0x02 << PDBHMO_ESP_DECAP_SHIFT)
+#define PDBHMO_ESP_ENCAP_DEC_TTL	(0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+/*
+ * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte
+ * from the outer IP header to the inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV		(0x01 << PDBHMO_ESP_DECAP_SHIFT)
+/*
+ * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from
+ * the PDB, copy the DF bit from the inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT		(0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+#define PDBNH_ESP_ENCAP_SHIFT		16
+#define PDBNH_ESP_ENCAP_MASK		(0xff << PDBNH_ESP_ENCAP_SHIFT)
+
+#define PDBHDRLEN_ESP_DECAP_SHIFT	16
+#define PDBHDRLEN_MASK			(0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+
+#define PDB_NH_OFFSET_SHIFT		8
+#define PDB_NH_OFFSET_MASK		(0xff << PDB_NH_OFFSET_SHIFT)
+
+/*
+ * PDB - IPSec ESP Encap/Decap Options
+ */
+#define PDBOPTS_ESP_ARSNONE	0x00 /* no antireplay window */
+#define PDBOPTS_ESP_ARS32	0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESP_ARS128	0x80 /* 128-entry antireplay window */
+#define PDBOPTS_ESP_ARS64	0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESP_ARS_MASK	0xc0 /* antireplay window mask */
+#define PDBOPTS_ESP_IVSRC	0x20 /* IV comes from internal random gen */
+#define PDBOPTS_ESP_ESN		0x10 /* extended sequence included */
+#define PDBOPTS_ESP_OUTFMT	0x08 /* output only decapsulation (decap) */
+#define PDBOPTS_ESP_IPHDRSRC	0x08 /* IP header comes from PDB (encap) */
+#define PDBOPTS_ESP_INCIPHDR	0x04 /* Prepend IP header to output frame */
+#define PDBOPTS_ESP_IPVSN	0x02 /* process IPv6 header */
+#define PDBOPTS_ESP_AOFL	0x04 /* adjust out frame len (decap, SEC>=5.3)*/
+#define PDBOPTS_ESP_TUNNEL	0x01 /* tunnel mode next-header byte */
+#define PDBOPTS_ESP_IPV6	0x02 /* ip header version is V6 */
+#define PDBOPTS_ESP_DIFFSERV	0x40 /* copy TOS/TC from inner iphdr */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */
+
+/*
+ * General IPSec encap/decap PDB definitions
+ */
+
+/**
+ * ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
+struct ipsec_encap_cbc {
+	u8 iv[16];
+};
+
+/**
+ * ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ctr {
+	u8 ctr_nonce[4];
+	u32 ctr_initial;
+	u64 iv;
+};
+
+/**
+ * ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ *  b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ *    0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ *  ctr_flags (8b) - counter flags; constant equal to 0x3
+ *  ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ccm {
+	u8 salt[4];
+	u32 ccm_opt;
+	u64 iv;
+};
+
+/**
+ * ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
+struct ipsec_encap_gcm {
+	u8 salt[4];
+	u32 rsvd1;
+	u64 iv;
+};
+
+/**
+ * ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description
+ *  hmo (header manipulation options) - 4b
+ *  reserved - 4b
+ *  next header - 8b
+ *  next header offset - 8b
+ *  option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ *  reserved - 16b
+ *  Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content
+ */
+struct ipsec_encap_pdb {
+	u32 options;
+	u32 seq_num_ext_hi;
+	u32 seq_num;
+	union {
+		struct ipsec_encap_cbc cbc;
+		struct ipsec_encap_ctr ctr;
+		struct ipsec_encap_ccm ccm;
+		struct ipsec_encap_gcm gcm;
+	};
+	u32 spi;
+	u32 ip_hdr_len;
+	u32 ip_hdr[0];
+};
+
+/**
+ * ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_cbc {
+	u32 rsvd[2];
+};
+
+/**
+ * ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ */
+struct ipsec_decap_ctr {
+	u8 ctr_nonce[4];
+	u32 ctr_initial;
+};
+
+/**
+ * ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ *  b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ *    0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ *  ctr_flags (8b) - counter flags; constant equal to 0x3
+ *  ctr_initial (16b) - initial count constant
+ */
+struct ipsec_decap_ccm {
+	u8 salt[4];
+	u32 ccm_opt;
+};
+
+/**
+ * ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_gcm {
+	u8 salt[4];
+	u32 resvd;
+};
+
+/**
+ * ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description
+ *  hmo (header manipulation options) - 4b
+ *  IP header length - 12b
+ *  next header offset - 8b
+ *  option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags)
+ */
+struct ipsec_decap_pdb {
+	u32 options;
+	union {
+		struct ipsec_decap_cbc cbc;
+		struct ipsec_decap_ctr ctr;
+		struct ipsec_decap_ccm ccm;
+		struct ipsec_decap_gcm gcm;
+	};
+	u32 seq_num_ext_hi;
+	u32 seq_num;
+	__be32 anti_replay[4];
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ */
+struct ipsec_deco_dpovrd {
+#define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80
+	u8 ovrd_ecn;
+	u8 ip_hdr_len;
+	u8 nh_offset;
+	u8 next_header; /* reserved if decap */
+};
+
+/*
+ * IEEE 802.11i WiFi Protocol Data Block
+ */
+#define WIFI_PDBOPTS_FCS	0x01
+#define WIFI_PDBOPTS_AR		0x40
+
+struct wifi_encap_pdb {
+	u16 mac_hdr_len;
+	u8 rsvd;
+	u8 options;
+	u8 iv_flags;
+	u8 pri;
+	u16 pn1;
+	u32 pn2;
+	u16 frm_ctrl_mask;
+	u16 seq_ctrl_mask;
+	u8 rsvd1[2];
+	u8 cnst;
+	u8 key_id;
+	u8 ctr_flags;
+	u8 rsvd2;
+	u16 ctr_init;
+};
+
+struct wifi_decap_pdb {
+	u16 mac_hdr_len;
+	u8 rsvd;
+	u8 options;
+	u8 iv_flags;
+	u8 pri;
+	u16 pn1;
+	u32 pn2;
+	u16 frm_ctrl_mask;
+	u16 seq_ctrl_mask;
+	u8 rsvd1[4];
+	u8 ctr_flags;
+	u8 rsvd2;
+	u16 ctr_init;
+};
+
+/*
+ * IEEE 802.16 WiMAX Protocol Data Block
+ */
+#define WIMAX_PDBOPTS_FCS	0x01
+#define WIMAX_PDBOPTS_AR	0x40 /* decap only */
+
+struct wimax_encap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u32 nonce;
+	u8 b0_flags;
+	u8 ctr_flags;
+	u16 ctr_init;
+	/* begin DECO writeback region */
+	u32 pn;
+	/* end DECO writeback region */
+};
+
+struct wimax_decap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u32 nonce;
+	u8 iv_flags;
+	u8 ctr_flags;
+	u16 ctr_init;
+	/* begin DECO writeback region */
+	u32 pn;
+	u8 rsvd1[2];
+	u16 antireplay_len;
+	u64 antireplay_scorecard;
+	/* end DECO writeback region */
+};
+
+/*
+ * IEEE 801.AE MacSEC Protocol Data Block
+ */
+#define MACSEC_PDBOPTS_FCS	0x01
+#define MACSEC_PDBOPTS_AR	0x40 /* used in decap only */
+
+struct macsec_encap_pdb {
+	u16 aad_len;
+	u8 rsvd;
+	u8 options;
+	u64 sci;
+	u16 ethertype;
+	u8 tci_an;
+	u8 rsvd1;
+	/* begin DECO writeback region */
+	u32 pn;
+	/* end DECO writeback region */
+};
+
+struct macsec_decap_pdb {
+	u16 aad_len;
+	u8 rsvd;
+	u8 options;
+	u64 sci;
+	u8 rsvd1[3];
+	/* begin DECO writeback region */
+	u8 antireplay_len;
+	u32 pn;
+	u64 antireplay_scorecard;
+	/* end DECO writeback region */
+};
+
+/*
+ * SSL/TLS/DTLS Protocol Data Blocks
+ */
+
+#define TLS_PDBOPTS_ARS32	0x40
+#define TLS_PDBOPTS_ARS64	0xc0
+#define TLS_PDBOPTS_OUTFMT	0x08
+#define TLS_PDBOPTS_IV_WRTBK	0x02 /* 1.1/1.2/DTLS only */
+#define TLS_PDBOPTS_EXP_RND_IV	0x01 /* 1.1/1.2/DTLS only */
+
+struct tls_block_encap_pdb {
+	u8 type;
+	u8 version[2];
+	u8 options;
+	u64 seq_num;
+	u32 iv[4];
+};
+
+struct tls_stream_encap_pdb {
+	u8 type;
+	u8 version[2];
+	u8 options;
+	u64 seq_num;
+	u8 i;
+	u8 j;
+	u8 rsvd1[2];
+};
+
+struct dtls_block_encap_pdb {
+	u8 type;
+	u8 version[2];
+	u8 options;
+	u16 epoch;
+	u16 seq_num[3];
+	u32 iv[4];
+};
+
+struct tls_block_decap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u64 seq_num;
+	u32 iv[4];
+};
+
+struct tls_stream_decap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u64 seq_num;
+	u8 i;
+	u8 j;
+	u8 rsvd1[2];
+};
+
+struct dtls_block_decap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u16 epoch;
+	u16 seq_num[3];
+	u32 iv[4];
+	u64 antireplay_scorecard;
+};
+
+/*
+ * SRTP Protocol Data Blocks
+ */
+#define SRTP_PDBOPTS_MKI	0x08
+#define SRTP_PDBOPTS_AR		0x40
+
+struct srtp_encap_pdb {
+	u8 x_len;
+	u8 mki_len;
+	u8 n_tag;
+	u8 options;
+	u32 cnst0;
+	u8 rsvd[2];
+	u16 cnst1;
+	u16 salt[7];
+	u16 cnst2;
+	u32 rsvd1;
+	u32 roc;
+	u32 opt_mki;
+};
+
+struct srtp_decap_pdb {
+	u8 x_len;
+	u8 mki_len;
+	u8 n_tag;
+	u8 options;
+	u32 cnst0;
+	u8 rsvd[2];
+	u16 cnst1;
+	u16 salt[7];
+	u16 cnst2;
+	u16 rsvd1;
+	u16 seq_num;
+	u32 roc;
+	u64 antireplay_scorecard;
+};
+
+/*
+ * DSA/ECDSA Protocol Data Blocks
+ * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar
+ * except for the treatment of "w" for verify, "s" for sign,
+ * and the placement of "a,b".
+ */
+#define DSA_PDB_SGF_SHIFT	24
+#define DSA_PDB_SGF_MASK	(0xff << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_Q		(0x80 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_R		(0x40 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_G		(0x20 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_W		(0x10 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_S		(0x10 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_F		(0x08 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_C		(0x04 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_D		(0x02 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_AB_SIGN	(0x02 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_AB_VERIFY	(0x01 << DSA_PDB_SGF_SHIFT)
+
+#define DSA_PDB_L_SHIFT		7
+#define DSA_PDB_L_MASK		(0x3ff << DSA_PDB_L_SHIFT)
+
+#define DSA_PDB_N_MASK		0x7f
+
+struct dsa_sign_pdb {
+	u32 sgf_ln; /* Use DSA_PDB_ defintions per above */
+	u8 *q;
+	u8 *r;
+	u8 *g;	/* or Gx,y */
+	u8 *s;
+	u8 *f;
+	u8 *c;
+	u8 *d;
+	u8 *ab; /* ECC only */
+	u8 *u;
+};
+
+struct dsa_verify_pdb {
+	u32 sgf_ln;
+	u8 *q;
+	u8 *r;
+	u8 *g;	/* or Gx,y */
+	u8 *w; /* or Wx,y */
+	u8 *f;
+	u8 *c;
+	u8 *d;
+	u8 *tmp; /* temporary data block */
+	u8 *ab; /* only used if ECC processing */
+};
+
+/* RSA Protocol Data Block */
+#define RSA_PDB_SGF_SHIFT       28
+#define RSA_PDB_E_SHIFT         12
+#define RSA_PDB_E_MASK          (0xFFF << RSA_PDB_E_SHIFT)
+#define RSA_PDB_D_SHIFT         12
+#define RSA_PDB_D_MASK          (0xFFF << RSA_PDB_D_SHIFT)
+#define RSA_PDB_Q_SHIFT         12
+#define RSA_PDB_Q_MASK          (0xFFF << RSA_PDB_Q_SHIFT)
+
+#define RSA_PDB_SGF_F           (0x8 << RSA_PDB_SGF_SHIFT)
+#define RSA_PDB_SGF_G           (0x4 << RSA_PDB_SGF_SHIFT)
+#define RSA_PRIV_PDB_SGF_F      (0x4 << RSA_PDB_SGF_SHIFT)
+#define RSA_PRIV_PDB_SGF_G      (0x8 << RSA_PDB_SGF_SHIFT)
+
+#define RSA_PRIV_KEY_FRM_1      0
+#define RSA_PRIV_KEY_FRM_2      1
+#define RSA_PRIV_KEY_FRM_3      2
+
+/**
+ * RSA Encrypt Protocol Data Block
+ * @sgf: scatter-gather field
+ * @f_dma: dma address of input data
+ * @g_dma: dma address of encrypted output data
+ * @n_dma: dma address of RSA modulus
+ * @e_dma: dma address of RSA public exponent
+ * @f_len: length in octets of the input data
+ */
+struct rsa_pub_pdb {
+	u32		sgf;
+	dma_addr_t	f_dma;
+	dma_addr_t	g_dma;
+	dma_addr_t	n_dma;
+	dma_addr_t	e_dma;
+	u32		f_len;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #1
+ * @sgf: scatter-gather field
+ * @g_dma: dma address of encrypted input data
+ * @f_dma: dma address of output data
+ * @n_dma: dma address of RSA modulus
+ * @d_dma: dma address of RSA private exponent
+ */
+struct rsa_priv_f1_pdb {
+	u32		sgf;
+	dma_addr_t	g_dma;
+	dma_addr_t	f_dma;
+	dma_addr_t	n_dma;
+	dma_addr_t	d_dma;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #2
+ * @sgf     : scatter-gather field
+ * @g_dma   : dma address of encrypted input data
+ * @f_dma   : dma address of output data
+ * @d_dma   : dma address of RSA private exponent
+ * @p_dma   : dma address of RSA prime factor p of RSA modulus n
+ * @q_dma   : dma address of RSA prime factor q of RSA modulus n
+ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ *            as internal state buffer. It is assumed to be as long as p.
+ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ *            as internal state buffer. It is assumed to be as long as q.
+ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
+ */
+struct rsa_priv_f2_pdb {
+	u32		sgf;
+	dma_addr_t	g_dma;
+	dma_addr_t	f_dma;
+	dma_addr_t	d_dma;
+	dma_addr_t	p_dma;
+	dma_addr_t	q_dma;
+	dma_addr_t	tmp1_dma;
+	dma_addr_t	tmp2_dma;
+	u32		p_q_len;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #3
+ * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
+ * the RSA modulus.
+ * @sgf     : scatter-gather field
+ * @g_dma   : dma address of encrypted input data
+ * @f_dma   : dma address of output data
+ * @c_dma   : dma address of RSA CRT coefficient
+ * @p_dma   : dma address of RSA prime factor p of RSA modulus n
+ * @q_dma   : dma address of RSA prime factor q of RSA modulus n
+ * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor p
+ * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor q
+ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ *            as internal state buffer. It is assumed to be as long as p.
+ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ *            as internal state buffer. It is assumed to be as long as q.
+ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
+ */
+struct rsa_priv_f3_pdb {
+	u32		sgf;
+	dma_addr_t	g_dma;
+	dma_addr_t	f_dma;
+	dma_addr_t	c_dma;
+	dma_addr_t	p_dma;
+	dma_addr_t	q_dma;
+	dma_addr_t	dp_dma;
+	dma_addr_t	dq_dma;
+	dma_addr_t	tmp1_dma;
+	dma_addr_t	tmp2_dma;
+	u32		p_q_len;
+} __packed;
+
+#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
new file mode 100644
index 0000000..2a8d87e
--- /dev/null
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+#include "caampkc.h"
+#include "desc_constr.h"
+
+/* Descriptor for RSA Public operation */
+void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
+{
+	init_job_desc_pdb(desc, 0, sizeof(*pdb));
+	append_cmd(desc, pdb->sgf);
+	append_ptr(desc, pdb->f_dma);
+	append_ptr(desc, pdb->g_dma);
+	append_ptr(desc, pdb->n_dma);
+	append_ptr(desc, pdb->e_dma);
+	append_cmd(desc, pdb->f_len);
+	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #1 */
+void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
+{
+	init_job_desc_pdb(desc, 0, sizeof(*pdb));
+	append_cmd(desc, pdb->sgf);
+	append_ptr(desc, pdb->g_dma);
+	append_ptr(desc, pdb->f_dma);
+	append_ptr(desc, pdb->n_dma);
+	append_ptr(desc, pdb->d_dma);
+	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+			 RSA_PRIV_KEY_FRM_1);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #2 */
+void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
+{
+	init_job_desc_pdb(desc, 0, sizeof(*pdb));
+	append_cmd(desc, pdb->sgf);
+	append_ptr(desc, pdb->g_dma);
+	append_ptr(desc, pdb->f_dma);
+	append_ptr(desc, pdb->d_dma);
+	append_ptr(desc, pdb->p_dma);
+	append_ptr(desc, pdb->q_dma);
+	append_ptr(desc, pdb->tmp1_dma);
+	append_ptr(desc, pdb->tmp2_dma);
+	append_cmd(desc, pdb->p_q_len);
+	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+			 RSA_PRIV_KEY_FRM_2);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #3 */
+void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
+{
+	init_job_desc_pdb(desc, 0, sizeof(*pdb));
+	append_cmd(desc, pdb->sgf);
+	append_ptr(desc, pdb->g_dma);
+	append_ptr(desc, pdb->f_dma);
+	append_ptr(desc, pdb->c_dma);
+	append_ptr(desc, pdb->p_dma);
+	append_ptr(desc, pdb->q_dma);
+	append_ptr(desc, pdb->dp_dma);
+	append_ptr(desc, pdb->dq_dma);
+	append_ptr(desc, pdb->tmp1_dma);
+	append_ptr(desc, pdb->tmp2_dma);
+	append_cmd(desc, pdb->p_q_len);
+	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+			 RSA_PRIV_KEY_FRM_3);
+}
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
new file mode 100644
index 0000000..67f7f8c
--- /dev/null
+++ b/drivers/crypto/caam/qi.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CAAM/SEC 4.x QI transport/backend driver
+ * Queue Interface backend functionality
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <soc/fsl/qman.h>
+
+#include "regs.h"
+#include "qi.h"
+#include "desc.h"
+#include "intern.h"
+#include "desc_constr.h"
+
+#define PREHDR_RSLS_SHIFT	31
+
+/*
+ * Use a reasonable backlog of frames (per CPU) as congestion threshold,
+ * so that resources used by the in-flight buffers do not become a memory hog.
+ */
+#define MAX_RSP_FQ_BACKLOG_PER_CPU	256
+
+#define CAAM_QI_ENQUEUE_RETRIES	10000
+
+#define CAAM_NAPI_WEIGHT	63
+
+/*
+ * caam_napi - struct holding CAAM NAPI-related params
+ * @irqtask: IRQ task for QI backend
+ * @p: QMan portal
+ */
+struct caam_napi {
+	struct napi_struct irqtask;
+	struct qman_portal *p;
+};
+
+/*
+ * caam_qi_pcpu_priv - percpu private data structure to main list of pending
+ *                     responses expected on each cpu.
+ * @caam_napi: CAAM NAPI params
+ * @net_dev: netdev used by NAPI
+ * @rsp_fq: response FQ from CAAM
+ */
+struct caam_qi_pcpu_priv {
+	struct caam_napi caam_napi;
+	struct net_device net_dev;
+	struct qman_fq *rsp_fq;
+} ____cacheline_aligned;
+
+static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
+static DEFINE_PER_CPU(int, last_cpu);
+
+/*
+ * caam_qi_priv - CAAM QI backend private params
+ * @cgr: QMan congestion group
+ * @qi_pdev: platform device for QI backend
+ */
+struct caam_qi_priv {
+	struct qman_cgr cgr;
+	struct platform_device *qi_pdev;
+};
+
+static struct caam_qi_priv qipriv ____cacheline_aligned;
+
+/*
+ * This is written by only one core - the one that initialized the CGR - and
+ * read by multiple cores (all the others).
+ */
+bool caam_congested __read_mostly;
+EXPORT_SYMBOL(caam_congested);
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * This is a counter for the number of times the congestion group (where all
+ * the request and response queueus are) reached congestion. Incremented
+ * each time the congestion callback is called with congested == true.
+ */
+static u64 times_congested;
+#endif
+
+/*
+ * CPU from where the module initialised. This is required because QMan driver
+ * requires CGRs to be removed from same CPU from where they were originally
+ * allocated.
+ */
+static int mod_init_cpu;
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
+ * doing malloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ *       being processed. This could be added by the dpaa-ethernet driver.
+ *       This would pose a problem for userspace application processing which
+ *       cannot know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
+{
+	struct qm_fd fd;
+	dma_addr_t addr;
+	int ret;
+	int num_retries = 0;
+
+	qm_fd_clear_fd(&fd);
+	qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
+
+	addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
+			      DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(qidev, addr)) {
+		dev_err(qidev, "DMA mapping error for QI enqueue request\n");
+		return -EIO;
+	}
+	qm_fd_addr_set64(&fd, addr);
+
+	do {
+		ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
+		if (likely(!ret))
+			return 0;
+
+		if (ret != -EBUSY)
+			break;
+		num_retries++;
+	} while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
+
+	dev_err(qidev, "qman_enqueue failed: %d\n", ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(caam_qi_enqueue);
+
+static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
+			   const union qm_mr_entry *msg)
+{
+	const struct qm_fd *fd;
+	struct caam_drv_req *drv_req;
+	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+
+	fd = &msg->ern.fd;
+
+	if (qm_fd_get_format(fd) != qm_fd_compound) {
+		dev_err(qidev, "Non-compound FD from CAAM\n");
+		return;
+	}
+
+	drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+	if (!drv_req) {
+		dev_err(qidev,
+			"Can't find original request for CAAM response\n");
+		return;
+	}
+
+	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+	drv_req->cbk(drv_req, -EIO);
+}
+
+static struct qman_fq *create_caam_req_fq(struct device *qidev,
+					  struct qman_fq *rsp_fq,
+					  dma_addr_t hwdesc,
+					  int fq_sched_flag)
+{
+	int ret;
+	struct qman_fq *req_fq;
+	struct qm_mcc_initfq opts;
+
+	req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
+	if (!req_fq)
+		return ERR_PTR(-ENOMEM);
+
+	req_fq->cb.ern = caam_fq_ern_cb;
+	req_fq->cb.fqs = NULL;
+
+	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+				QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
+	if (ret) {
+		dev_err(qidev, "Failed to create session req FQ\n");
+		goto create_req_fq_fail;
+	}
+
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+				   QM_INITFQ_WE_CONTEXTB |
+				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
+	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
+	qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
+	opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
+	qm_fqd_context_a_set64(&opts.fqd, hwdesc);
+	opts.fqd.cgid = qipriv.cgr.cgrid;
+
+	ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
+	if (ret) {
+		dev_err(qidev, "Failed to init session req FQ\n");
+		goto init_req_fq_fail;
+	}
+
+	dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
+		smp_processor_id());
+	return req_fq;
+
+init_req_fq_fail:
+	qman_destroy_fq(req_fq);
+create_req_fq_fail:
+	kfree(req_fq);
+	return ERR_PTR(ret);
+}
+
+static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
+{
+	int ret;
+
+	ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
+				    QMAN_VOLATILE_FLAG_FINISH,
+				    QM_VDQCR_PRECEDENCE_VDQCR |
+				    QM_VDQCR_NUMFRAMES_TILLEMPTY);
+	if (ret) {
+		dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
+		return ret;
+	}
+
+	do {
+		struct qman_portal *p;
+
+		p = qman_get_affine_portal(smp_processor_id());
+		qman_p_poll_dqrr(p, 16);
+	} while (fq->flags & QMAN_FQ_STATE_NE);
+
+	return 0;
+}
+
+static int kill_fq(struct device *qidev, struct qman_fq *fq)
+{
+	u32 flags;
+	int ret;
+
+	ret = qman_retire_fq(fq, &flags);
+	if (ret < 0) {
+		dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
+		return ret;
+	}
+
+	if (!ret)
+		goto empty_fq;
+
+	/* Async FQ retirement condition */
+	if (ret == 1) {
+		/* Retry till FQ gets in retired state */
+		do {
+			msleep(20);
+		} while (fq->state != qman_fq_state_retired);
+
+		WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
+		WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
+	}
+
+empty_fq:
+	if (fq->flags & QMAN_FQ_STATE_NE) {
+		ret = empty_retired_fq(qidev, fq);
+		if (ret) {
+			dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
+				fq->fqid);
+			return ret;
+		}
+	}
+
+	ret = qman_oos_fq(fq);
+	if (ret)
+		dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
+
+	qman_destroy_fq(fq);
+	kfree(fq);
+
+	return ret;
+}
+
+static int empty_caam_fq(struct qman_fq *fq)
+{
+	int ret;
+	struct qm_mcr_queryfq_np np;
+
+	/* Wait till the older CAAM FQ get empty */
+	do {
+		ret = qman_query_fq_np(fq, &np);
+		if (ret)
+			return ret;
+
+		if (!qm_mcr_np_get(&np, frm_cnt))
+			break;
+
+		msleep(20);
+	} while (1);
+
+	/*
+	 * Give extra time for pending jobs from this FQ in holding tanks
+	 * to get processed
+	 */
+	msleep(20);
+	return 0;
+}
+
+int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
+{
+	int ret;
+	u32 num_words;
+	struct qman_fq *new_fq, *old_fq;
+	struct device *qidev = drv_ctx->qidev;
+
+	num_words = desc_len(sh_desc);
+	if (num_words > MAX_SDLEN) {
+		dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
+		return -EINVAL;
+	}
+
+	/* Note down older req FQ */
+	old_fq = drv_ctx->req_fq;
+
+	/* Create a new req FQ in parked state */
+	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
+				    drv_ctx->context_a, 0);
+	if (unlikely(IS_ERR_OR_NULL(new_fq))) {
+		dev_err(qidev, "FQ allocation for shdesc update failed\n");
+		return PTR_ERR(new_fq);
+	}
+
+	/* Hook up new FQ to context so that new requests keep queuing */
+	drv_ctx->req_fq = new_fq;
+
+	/* Empty and remove the older FQ */
+	ret = empty_caam_fq(old_fq);
+	if (ret) {
+		dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
+
+		/* We can revert to older FQ */
+		drv_ctx->req_fq = old_fq;
+
+		if (kill_fq(qidev, new_fq))
+			dev_warn(qidev, "New CAAM FQ kill failed\n");
+
+		return ret;
+	}
+
+	/*
+	 * Re-initialise pre-header. Set RSLS and SDLEN.
+	 * Update the shared descriptor for driver context.
+	 */
+	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+					   num_words);
+	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+	dma_sync_single_for_device(qidev, drv_ctx->context_a,
+				   sizeof(drv_ctx->sh_desc) +
+				   sizeof(drv_ctx->prehdr),
+				   DMA_BIDIRECTIONAL);
+
+	/* Put the new FQ in scheduled state */
+	ret = qman_schedule_fq(new_fq);
+	if (ret) {
+		dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
+
+		/*
+		 * We can kill new FQ and revert to old FQ.
+		 * Since the desc is already modified, it is success case
+		 */
+
+		drv_ctx->req_fq = old_fq;
+
+		if (kill_fq(qidev, new_fq))
+			dev_warn(qidev, "New CAAM FQ kill failed\n");
+	} else if (kill_fq(qidev, old_fq)) {
+		dev_warn(qidev, "Old CAAM FQ kill failed\n");
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(caam_drv_ctx_update);
+
+struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
+				       int *cpu,
+				       u32 *sh_desc)
+{
+	size_t size;
+	u32 num_words;
+	dma_addr_t hwdesc;
+	struct caam_drv_ctx *drv_ctx;
+	const cpumask_t *cpus = qman_affine_cpus();
+
+	num_words = desc_len(sh_desc);
+	if (num_words > MAX_SDLEN) {
+		dev_err(qidev, "Invalid descriptor len: %d words\n",
+			num_words);
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
+	if (!drv_ctx)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
+	 * and dma-map them.
+	 */
+	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+					   num_words);
+	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+	size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
+	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
+				DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(qidev, hwdesc)) {
+		dev_err(qidev, "DMA map error for preheader + shdesc\n");
+		kfree(drv_ctx);
+		return ERR_PTR(-ENOMEM);
+	}
+	drv_ctx->context_a = hwdesc;
+
+	/* If given CPU does not own the portal, choose another one that does */
+	if (!cpumask_test_cpu(*cpu, cpus)) {
+		int *pcpu = &get_cpu_var(last_cpu);
+
+		*pcpu = cpumask_next(*pcpu, cpus);
+		if (*pcpu >= nr_cpu_ids)
+			*pcpu = cpumask_first(cpus);
+		*cpu = *pcpu;
+
+		put_cpu_var(last_cpu);
+	}
+	drv_ctx->cpu = *cpu;
+
+	/* Find response FQ hooked with this CPU */
+	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
+
+	/* Attach request FQ */
+	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
+					     QMAN_INITFQ_FLAG_SCHED);
+	if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
+		dev_err(qidev, "create_caam_req_fq failed\n");
+		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
+		kfree(drv_ctx);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	drv_ctx->qidev = qidev;
+	return drv_ctx;
+}
+EXPORT_SYMBOL(caam_drv_ctx_init);
+
+void *qi_cache_alloc(gfp_t flags)
+{
+	return kmem_cache_alloc(qi_cache, flags);
+}
+EXPORT_SYMBOL(qi_cache_alloc);
+
+void qi_cache_free(void *obj)
+{
+	kmem_cache_free(qi_cache, obj);
+}
+EXPORT_SYMBOL(qi_cache_free);
+
+static int caam_qi_poll(struct napi_struct *napi, int budget)
+{
+	struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
+
+	int cleaned = qman_p_poll_dqrr(np->p, budget);
+
+	if (cleaned < budget) {
+		napi_complete(napi);
+		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+	}
+
+	return cleaned;
+}
+
+void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
+{
+	if (IS_ERR_OR_NULL(drv_ctx))
+		return;
+
+	/* Remove request FQ */
+	if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
+		dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
+
+	dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
+			 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
+			 DMA_BIDIRECTIONAL);
+	kfree(drv_ctx);
+}
+EXPORT_SYMBOL(caam_drv_ctx_rel);
+
+int caam_qi_shutdown(struct device *qidev)
+{
+	int i, ret;
+	struct caam_qi_priv *priv = dev_get_drvdata(qidev);
+	const cpumask_t *cpus = qman_affine_cpus();
+	struct cpumask old_cpumask = current->cpus_allowed;
+
+	for_each_cpu(i, cpus) {
+		struct napi_struct *irqtask;
+
+		irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
+		napi_disable(irqtask);
+		netif_napi_del(irqtask);
+
+		if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
+			dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
+	}
+
+	/*
+	 * QMan driver requires CGRs to be deleted from same CPU from where they
+	 * were instantiated. Hence we get the module removal execute from the
+	 * same CPU from where it was originally inserted.
+	 */
+	set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+	ret = qman_delete_cgr(&priv->cgr);
+	if (ret)
+		dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
+	else
+		qman_release_cgrid(priv->cgr.cgrid);
+
+	kmem_cache_destroy(qi_cache);
+
+	/* Now that we're done with the CGRs, restore the cpus allowed mask */
+	set_cpus_allowed_ptr(current, &old_cpumask);
+
+	platform_device_unregister(priv->qi_pdev);
+	return ret;
+}
+
+static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
+{
+	caam_congested = congested;
+
+	if (congested) {
+#ifdef CONFIG_DEBUG_FS
+		times_congested++;
+#endif
+		pr_debug_ratelimited("CAAM entered congestion\n");
+
+	} else {
+		pr_debug_ratelimited("CAAM exited congestion\n");
+	}
+}
+
+static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
+{
+	/*
+	 * In case of threaded ISR, for RT kernels in_irq() does not return
+	 * appropriate value, so use in_serving_softirq to distinguish between
+	 * softirq and irq contexts.
+	 */
+	if (unlikely(in_irq() || !in_serving_softirq())) {
+		/* Disable QMan IRQ source and invoke NAPI */
+		qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
+		np->p = p;
+		napi_schedule(&np->irqtask);
+		return 1;
+	}
+	return 0;
+}
+
+static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
+						    struct qman_fq *rsp_fq,
+						    const struct qm_dqrr_entry *dqrr)
+{
+	struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
+	struct caam_drv_req *drv_req;
+	const struct qm_fd *fd;
+	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+	u32 status;
+
+	if (caam_qi_napi_schedule(p, caam_napi))
+		return qman_cb_dqrr_stop;
+
+	fd = &dqrr->fd;
+	status = be32_to_cpu(fd->status);
+	if (unlikely(status)) {
+		u32 ssrc = status & JRSTA_SSRC_MASK;
+		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+		if (ssrc != JRSTA_SSRC_CCB_ERROR ||
+		    err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+			dev_err(qidev, "Error: %#x in CAAM response FD\n",
+				status);
+	}
+
+	if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
+		dev_err(qidev, "Non-compound FD from CAAM\n");
+		return qman_cb_dqrr_consume;
+	}
+
+	drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+	if (unlikely(!drv_req)) {
+		dev_err(qidev,
+			"Can't find original request for caam response\n");
+		return qman_cb_dqrr_consume;
+	}
+
+	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+	drv_req->cbk(drv_req, status);
+	return qman_cb_dqrr_consume;
+}
+
+static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
+{
+	struct qm_mcc_initfq opts;
+	struct qman_fq *fq;
+	int ret;
+
+	fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
+	if (!fq)
+		return -ENOMEM;
+
+	fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
+
+	ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
+			     QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
+	if (ret) {
+		dev_err(qidev, "Rsp FQ create failed\n");
+		kfree(fq);
+		return -ENODEV;
+	}
+
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+				   QM_INITFQ_WE_CONTEXTB |
+				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
+	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
+				       QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
+	qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
+	opts.fqd.cgid = qipriv.cgr.cgrid;
+	opts.fqd.context_a.stashing.exclusive =	QM_STASHING_EXCL_CTX |
+						QM_STASHING_EXCL_DATA;
+	qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
+
+	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+	if (ret) {
+		dev_err(qidev, "Rsp FQ init failed\n");
+		kfree(fq);
+		return -ENODEV;
+	}
+
+	per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
+
+	dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
+	return 0;
+}
+
+static int init_cgr(struct device *qidev)
+{
+	int ret;
+	struct qm_mcc_initcgr opts;
+	const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
+			MAX_RSP_FQ_BACKLOG_PER_CPU;
+
+	ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
+	if (ret) {
+		dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
+		return ret;
+	}
+
+	qipriv.cgr.cb = cgr_cb;
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
+				   QM_CGR_WE_MODE);
+	opts.cgr.cscn_en = QM_CGR_EN;
+	opts.cgr.mode = QMAN_CGR_MODE_FRAME;
+	qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
+
+	ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
+	if (ret) {
+		dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
+			qipriv.cgr.cgrid);
+		return ret;
+	}
+
+	dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
+	return 0;
+}
+
+static int alloc_rsp_fqs(struct device *qidev)
+{
+	int ret, i;
+	const cpumask_t *cpus = qman_affine_cpus();
+
+	/*Now create response FQs*/
+	for_each_cpu(i, cpus) {
+		ret = alloc_rsp_fq_cpu(qidev, i);
+		if (ret) {
+			dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void free_rsp_fqs(void)
+{
+	int i;
+	const cpumask_t *cpus = qman_affine_cpus();
+
+	for_each_cpu(i, cpus)
+		kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
+}
+
+int caam_qi_init(struct platform_device *caam_pdev)
+{
+	int err, i;
+	struct platform_device *qi_pdev;
+	struct device *ctrldev = &caam_pdev->dev, *qidev;
+	struct caam_drv_private *ctrlpriv;
+	const cpumask_t *cpus = qman_affine_cpus();
+	struct cpumask old_cpumask = current->cpus_allowed;
+	static struct platform_device_info qi_pdev_info = {
+		.name = "caam_qi",
+		.id = PLATFORM_DEVID_NONE
+	};
+
+	/*
+	 * QMAN requires CGRs to be removed from same CPU+portal from where it
+	 * was originally allocated. Hence we need to note down the
+	 * initialisation CPU and use the same CPU for module exit.
+	 * We select the first CPU to from the list of portal owning CPUs.
+	 * Then we pin module init to this CPU.
+	 */
+	mod_init_cpu = cpumask_first(cpus);
+	set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+	qi_pdev_info.parent = ctrldev;
+	qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
+	qi_pdev = platform_device_register_full(&qi_pdev_info);
+	if (IS_ERR(qi_pdev))
+		return PTR_ERR(qi_pdev);
+	set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
+
+	ctrlpriv = dev_get_drvdata(ctrldev);
+	qidev = &qi_pdev->dev;
+
+	qipriv.qi_pdev = qi_pdev;
+	dev_set_drvdata(qidev, &qipriv);
+
+	/* Initialize the congestion detection */
+	err = init_cgr(qidev);
+	if (err) {
+		dev_err(qidev, "CGR initialization failed: %d\n", err);
+		platform_device_unregister(qi_pdev);
+		return err;
+	}
+
+	/* Initialise response FQs */
+	err = alloc_rsp_fqs(qidev);
+	if (err) {
+		dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
+		free_rsp_fqs();
+		platform_device_unregister(qi_pdev);
+		return err;
+	}
+
+	/*
+	 * Enable the NAPI contexts on each of the core which has an affine
+	 * portal.
+	 */
+	for_each_cpu(i, cpus) {
+		struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
+		struct caam_napi *caam_napi = &priv->caam_napi;
+		struct napi_struct *irqtask = &caam_napi->irqtask;
+		struct net_device *net_dev = &priv->net_dev;
+
+		net_dev->dev = *qidev;
+		INIT_LIST_HEAD(&net_dev->napi_list);
+
+		netif_napi_add(net_dev, irqtask, caam_qi_poll,
+			       CAAM_NAPI_WEIGHT);
+
+		napi_enable(irqtask);
+	}
+
+	/* Hook up QI device to parent controlling caam device */
+	ctrlpriv->qidev = qidev;
+
+	qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
+				     SLAB_CACHE_DMA, NULL);
+	if (!qi_cache) {
+		dev_err(qidev, "Can't allocate CAAM cache\n");
+		free_rsp_fqs();
+		platform_device_unregister(qi_pdev);
+		return -ENOMEM;
+	}
+
+	/* Done with the CGRs; restore the cpus allowed mask */
+	set_cpus_allowed_ptr(current, &old_cpumask);
+#ifdef CONFIG_DEBUG_FS
+	debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+			    &times_congested, &caam_fops_u64_ro);
+#endif
+	dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
+	return 0;
+}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
new file mode 100644
index 0000000..357b69f
--- /dev/null
+++ b/drivers/crypto/caam/qi.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public definitions for the CAAM/QI (Queue Interface) backend.
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#ifndef __QI_H__
+#define __QI_H__
+
+#include <soc/fsl/qman.h>
+#include "compat.h"
+#include "desc.h"
+#include "desc_constr.h"
+
+/*
+ * CAAM hardware constructs a job descriptor which points to a shared descriptor
+ * (as pointed by context_a of to-CAAM FQ).
+ * When the job descriptor is executed by DECO, the whole job descriptor
+ * together with shared descriptor gets loaded in DECO buffer, which is
+ * 64 words (each 32-bit) long.
+ *
+ * The job descriptor constructed by CAAM hardware has the following layout:
+ *
+ *	HEADER		(1 word)
+ *	Shdesc ptr	(1 or 2 words)
+ *	SEQ_OUT_PTR	(1 word)
+ *	Out ptr		(1 or 2 words)
+ *	Out length	(1 word)
+ *	SEQ_IN_PTR	(1 word)
+ *	In ptr		(1 or 2 words)
+ *	In length	(1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
+ *
+ * Apart from shdesc contents, the total number of words that get loaded in DECO
+ * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
+ * storing shared descriptor.
+ */
+#define MAX_SDLEN	((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE	768
+
+extern bool caam_congested __read_mostly;
+
+/*
+ * This is the request structure the driver application should fill while
+ * submitting a job to driver.
+ */
+struct caam_drv_req;
+
+/*
+ * caam_qi_cbk - application's callback function invoked by the driver when the
+ *               request has been successfully processed.
+ * @drv_req: original request that was submitted
+ * @status: completion status of request (0 - success, non-zero - error code)
+ */
+typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
+
+enum optype {
+	ENCRYPT,
+	DECRYPT,
+	GIVENCRYPT,
+	NUM_OP
+};
+
+/**
+ * caam_drv_ctx - CAAM/QI backend driver context
+ *
+ * The jobs are processed by the driver against a driver context.
+ * With every cryptographic context, a driver context is attached.
+ * The driver context contains data for private use by driver.
+ * For the applications, this is an opaque structure.
+ *
+ * @prehdr: preheader placed before shrd desc
+ * @sh_desc: shared descriptor
+ * @context_a: shared descriptor dma address
+ * @req_fq: to-CAAM request frame queue
+ * @rsp_fq: from-CAAM response frame queue
+ * @cpu: cpu on which to receive CAAM response
+ * @op_type: operation type
+ * @qidev: device pointer for CAAM/QI backend
+ */
+struct caam_drv_ctx {
+	u32 prehdr[2];
+	u32 sh_desc[MAX_SDLEN];
+	dma_addr_t context_a;
+	struct qman_fq *req_fq;
+	struct qman_fq *rsp_fq;
+	int cpu;
+	enum optype op_type;
+	struct device *qidev;
+} ____cacheline_aligned;
+
+/**
+ * caam_drv_req - The request structure the driver application should fill while
+ *                submitting a job to driver.
+ * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
+ *          buffers.
+ * @cbk: callback function to invoke when job is completed
+ * @app_ctx: arbitrary context attached with request by the application
+ *
+ * The fields mentioned below should not be used by application.
+ * These are for private use by driver.
+ *
+ * @hdr__: linked list header to maintain list of outstanding requests to CAAM
+ * @hwaddr: DMA address for the S/G table.
+ */
+struct caam_drv_req {
+	struct qm_sg_entry fd_sgt[2];
+	struct caam_drv_ctx *drv_ctx;
+	caam_qi_cbk cbk;
+	void *app_ctx;
+} ____cacheline_aligned;
+
+/**
+ * caam_drv_ctx_init - Initialise a CAAM/QI driver context
+ *
+ * A CAAM/QI driver context must be attached with each cryptographic context.
+ * This function allocates memory for CAAM/QI context and returns a handle to
+ * the application. This handle must be submitted along with each enqueue
+ * request to the driver by the application.
+ *
+ * @cpu: CPU where the application prefers to the driver to receive CAAM
+ *       responses. The request completion callback would be issued from this
+ *       CPU.
+ * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
+ *           context.
+ *
+ * Returns a driver context on success or negative error code on failure.
+ */
+struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
+				       u32 *sh_desc);
+
+/**
+ * caam_qi_enqueue - Submit a request to QI backend driver.
+ *
+ * The request structure must be properly filled as described above.
+ *
+ * @qidev: device pointer for QI backend
+ * @req: CAAM QI request structure
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
+
+/**
+ * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
+ *		       or too many CAAM responses are pending to be processed.
+ * @drv_ctx: driver context for which job is to be submitted
+ *
+ * Returns caam congestion status 'true/false'
+ */
+bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
+
+/**
+ * caam_drv_ctx_update - Update QI driver context
+ *
+ * Invoked when shared descriptor is required to be change in driver context.
+ *
+ * @drv_ctx: driver context to be updated
+ * @sh_desc: new shared descriptor pointer to be updated in QI driver context
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
+
+/**
+ * caam_drv_ctx_rel - Release a QI driver context
+ * @drv_ctx: context to be released
+ */
+void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
+
+int caam_qi_init(struct platform_device *pdev);
+int caam_qi_shutdown(struct device *dev);
+
+/**
+ * qi_cache_alloc - Allocate buffers from CAAM-QI cache
+ *
+ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
+ * to be allocated on the hotpath. Instead of using malloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of 256B, which is sufficient for hosting 16 SG entries.
+ *
+ * @flags: flags that would be used for the equivalent malloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+void *qi_cache_alloc(gfp_t flags);
+
+/**
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
+ * the buffer previously allocated by a qi_cache_alloc call.
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ *
+ * @obj: object previously allocated using qi_cache_alloc()
+ */
+void qi_cache_free(void *obj);
+
+#endif /* __QI_H__ */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
new file mode 100644
index 0000000..ce3f9ad
--- /dev/null
+++ b/drivers/crypto/caam/regs.h
@@ -0,0 +1,895 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM hardware register-level view
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef REGS_H
+#define REGS_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+/*
+ * Architecture-specific register access methods
+ *
+ * CAAM's bus-addressable registers are 64 bits internally.
+ * They have been wired to be safely accessible on 32-bit
+ * architectures, however. Registers were organized such
+ * that (a) they can be contained in 32 bits, (b) if not, then they
+ * can be treated as two 32-bit entities, or finally (c) if they
+ * must be treated as a single 64-bit value, then this can safely
+ * be done with two 32-bit cycles.
+ *
+ * For 32-bit operations on 64-bit values, CAAM follows the same
+ * 64-bit register access conventions as it's predecessors, in that
+ * writes are "triggered" by a write to the register at the numerically
+ * higher address, thus, a full 64-bit write cycle requires a write
+ * to the lower address, followed by a write to the higher address,
+ * which will latch/execute the write cycle.
+ *
+ * For example, let's assume a SW reset of CAAM through the master
+ * configuration register.
+ * - SWRST is in bit 31 of MCFG.
+ * - MCFG begins at base+0x0000.
+ * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
+ * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
+ *
+ * (and on Power, the convention is 0-31, 32-63, I know...)
+ *
+ * Assuming a 64-bit write to this MCFG to perform a software reset
+ * would then require a write of 0 to base+0x0000, followed by a
+ * write of 0x80000000 to base+0x0004, which would "execute" the
+ * reset.
+ *
+ * Of course, since MCFG 63-32 is all zero, we could cheat and simply
+ * write 0x8000000 to base+0x0004, and the reset would work fine.
+ * However, since CAAM does contain some write-and-read-intended
+ * 64-bit registers, this code defines 64-bit access methods for
+ * the sake of internal consistency and simplicity, and so that a
+ * clean transition to 64-bit is possible when it becomes necessary.
+ *
+ * There are limitations to this that the developer must recognize.
+ * 32-bit architectures cannot enforce an atomic-64 operation,
+ * Therefore:
+ *
+ * - On writes, since the HW is assumed to latch the cycle on the
+ *   write of the higher-numeric-address word, then ordered
+ *   writes work OK.
+ *
+ * - For reads, where a register contains a relevant value of more
+ *   that 32 bits, the hardware employs logic to latch the other
+ *   "half" of the data until read, ensuring an accurate value.
+ *   This is of particular relevance when dealing with CAAM's
+ *   performance counters.
+ *
+ */
+
+extern bool caam_little_end;
+extern bool caam_imx;
+
+#define caam_to_cpu(len)						\
+static inline u##len caam##len ## _to_cpu(u##len val)			\
+{									\
+	if (caam_little_end)						\
+		return le##len ## _to_cpu((__force __le##len)val);	\
+	else								\
+		return be##len ## _to_cpu((__force __be##len)val);	\
+}
+
+#define cpu_to_caam(len)					\
+static inline u##len cpu_to_caam##len(u##len val)		\
+{								\
+	if (caam_little_end)					\
+		return (__force u##len)cpu_to_le##len(val);	\
+	else							\
+		return (__force u##len)cpu_to_be##len(val);	\
+}
+
+caam_to_cpu(16)
+caam_to_cpu(32)
+caam_to_cpu(64)
+cpu_to_caam(16)
+cpu_to_caam(32)
+cpu_to_caam(64)
+
+static inline void wr_reg32(void __iomem *reg, u32 data)
+{
+	if (caam_little_end)
+		iowrite32(data, reg);
+	else
+		iowrite32be(data, reg);
+}
+
+static inline u32 rd_reg32(void __iomem *reg)
+{
+	if (caam_little_end)
+		return ioread32(reg);
+
+	return ioread32be(reg);
+}
+
+static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set)
+{
+	if (caam_little_end)
+		iowrite32((ioread32(reg) & ~clear) | set, reg);
+	else
+		iowrite32be((ioread32be(reg) & ~clear) | set, reg);
+}
+
+/*
+ * The only users of these wr/rd_reg64 functions is the Job Ring (JR).
+ * The DMA address registers in the JR are handled differently depending on
+ * platform:
+ *
+ * 1. All BE CAAM platforms and i.MX platforms (LE CAAM):
+ *
+ *    base + 0x0000 : most-significant 32 bits
+ *    base + 0x0004 : least-significant 32 bits
+ *
+ * The 32-bit version of this core therefore has to write to base + 0x0004
+ * to set the 32-bit wide DMA address.
+ *
+ * 2. All other LE CAAM platforms (LS1021A etc.)
+ *    base + 0x0000 : least-significant 32 bits
+ *    base + 0x0004 : most-significant 32 bits
+ */
+#ifdef CONFIG_64BIT
+static inline void wr_reg64(void __iomem *reg, u64 data)
+{
+	if (caam_little_end)
+		iowrite64(data, reg);
+	else
+		iowrite64be(data, reg);
+}
+
+static inline u64 rd_reg64(void __iomem *reg)
+{
+	if (caam_little_end)
+		return ioread64(reg);
+	else
+		return ioread64be(reg);
+}
+
+#else /* CONFIG_64BIT */
+static inline void wr_reg64(void __iomem *reg, u64 data)
+{
+	if (!caam_imx && caam_little_end) {
+		wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
+		wr_reg32((u32 __iomem *)(reg), data);
+	} else {
+		wr_reg32((u32 __iomem *)(reg), data >> 32);
+		wr_reg32((u32 __iomem *)(reg) + 1, data);
+	}
+}
+
+static inline u64 rd_reg64(void __iomem *reg)
+{
+	if (!caam_imx && caam_little_end)
+		return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
+			(u64)rd_reg32((u32 __iomem *)(reg)));
+
+	return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+		(u64)rd_reg32((u32 __iomem *)(reg) + 1));
+}
+#endif /* CONFIG_64BIT  */
+
+static inline u64 cpu_to_caam_dma64(dma_addr_t value)
+{
+	if (caam_imx)
+		return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
+			 (u64)cpu_to_caam32(upper_32_bits(value)));
+
+	return cpu_to_caam64(value);
+}
+
+static inline u64 caam_dma64_to_cpu(u64 value)
+{
+	if (caam_imx)
+		return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
+			 (u64)caam32_to_cpu(upper_32_bits(value)));
+
+	return caam64_to_cpu(value);
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
+#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
+#else
+#define cpu_to_caam_dma(value) cpu_to_caam32(value)
+#define caam_dma_to_cpu(value) caam32_to_cpu(value)
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+
+/*
+ * jr_outentry
+ * Represents each entry in a JobR output ring
+ */
+struct jr_outentry {
+	dma_addr_t desc;/* Pointer to completed descriptor */
+	u32 jrstatus;	/* Status for completed descriptor */
+} __packed;
+
+/*
+ * caam_perfmon - Performance Monitor/Secure Memory Status/
+ *                CAAM Global Status/Component Version IDs
+ *
+ * Spans f00-fff wherever instantiated
+ */
+
+/* Number of DECOs */
+#define CHA_NUM_MS_DECONUM_SHIFT	24
+#define CHA_NUM_MS_DECONUM_MASK	(0xfull << CHA_NUM_MS_DECONUM_SHIFT)
+
+/*
+ * CHA version IDs / instantiation bitfields
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
+#define CHA_ID_LS_AES_SHIFT	0
+#define CHA_ID_LS_AES_MASK	(0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_LP	(0x3ull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_HP	(0x4ull << CHA_ID_LS_AES_SHIFT)
+
+#define CHA_ID_LS_DES_SHIFT	4
+#define CHA_ID_LS_DES_MASK	(0xfull << CHA_ID_LS_DES_SHIFT)
+
+#define CHA_ID_LS_ARC4_SHIFT	8
+#define CHA_ID_LS_ARC4_MASK	(0xfull << CHA_ID_LS_ARC4_SHIFT)
+
+#define CHA_ID_LS_MD_SHIFT	12
+#define CHA_ID_LS_MD_MASK	(0xfull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP256	(0x0ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP512	(0x1ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_HP		(0x2ull << CHA_ID_LS_MD_SHIFT)
+
+#define CHA_ID_LS_RNG_SHIFT	16
+#define CHA_ID_LS_RNG_MASK	(0xfull << CHA_ID_LS_RNG_SHIFT)
+
+#define CHA_ID_LS_SNW8_SHIFT	20
+#define CHA_ID_LS_SNW8_MASK	(0xfull << CHA_ID_LS_SNW8_SHIFT)
+
+#define CHA_ID_LS_KAS_SHIFT	24
+#define CHA_ID_LS_KAS_MASK	(0xfull << CHA_ID_LS_KAS_SHIFT)
+
+#define CHA_ID_LS_PK_SHIFT	28
+#define CHA_ID_LS_PK_MASK	(0xfull << CHA_ID_LS_PK_SHIFT)
+
+#define CHA_ID_MS_CRC_SHIFT	0
+#define CHA_ID_MS_CRC_MASK	(0xfull << CHA_ID_MS_CRC_SHIFT)
+
+#define CHA_ID_MS_SNW9_SHIFT	4
+#define CHA_ID_MS_SNW9_MASK	(0xfull << CHA_ID_MS_SNW9_SHIFT)
+
+#define CHA_ID_MS_DECO_SHIFT	24
+#define CHA_ID_MS_DECO_MASK	(0xfull << CHA_ID_MS_DECO_SHIFT)
+
+#define CHA_ID_MS_JR_SHIFT	28
+#define CHA_ID_MS_JR_MASK	(0xfull << CHA_ID_MS_JR_SHIFT)
+
+struct sec_vid {
+	u16 ip_id;
+	u8 maj_rev;
+	u8 min_rev;
+};
+
+struct caam_perfmon {
+	/* Performance Monitor Registers			f00-f9f */
+	u64 req_dequeued;	/* PC_REQ_DEQ - Dequeued Requests	     */
+	u64 ob_enc_req;	/* PC_OB_ENC_REQ - Outbound Encrypt Requests */
+	u64 ib_dec_req;	/* PC_IB_DEC_REQ - Inbound Decrypt Requests  */
+	u64 ob_enc_bytes;	/* PC_OB_ENCRYPT - Outbound Bytes Encrypted  */
+	u64 ob_prot_bytes;	/* PC_OB_PROTECT - Outbound Bytes Protected  */
+	u64 ib_dec_bytes;	/* PC_IB_DECRYPT - Inbound Bytes Decrypted   */
+	u64 ib_valid_bytes;	/* PC_IB_VALIDATED Inbound Bytes Validated   */
+	u64 rsvd[13];
+
+	/* CAAM Hardware Instantiation Parameters		fa0-fbf */
+	u32 cha_rev_ms;		/* CRNR - CHA Rev No. Most significant half*/
+	u32 cha_rev_ls;		/* CRNR - CHA Rev No. Least significant half*/
+#define CTPR_MS_QI_SHIFT	25
+#define CTPR_MS_QI_MASK		(0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_DPAA2		BIT(13)
+#define CTPR_MS_VIRT_EN_INCL	0x00000001
+#define CTPR_MS_VIRT_EN_POR	0x00000002
+#define CTPR_MS_PG_SZ_MASK	0x10
+#define CTPR_MS_PG_SZ_SHIFT	4
+	u32 comp_parms_ms;	/* CTPR - Compile Parameters Register	*/
+	u32 comp_parms_ls;	/* CTPR - Compile Parameters Register	*/
+	u64 rsvd1[2];
+
+	/* CAAM Global Status					fc0-fdf */
+	u64 faultaddr;	/* FAR  - Fault Address		*/
+	u32 faultliodn;	/* FALR - Fault Address LIODN	*/
+	u32 faultdetail;	/* FADR - Fault Addr Detail	*/
+	u32 rsvd2;
+#define CSTA_PLEND		BIT(10)
+#define CSTA_ALT_PLEND		BIT(18)
+	u32 status;		/* CSTA - CAAM Status */
+	u64 rsvd3;
+
+	/* Component Instantiation Parameters			fe0-fff */
+	u32 rtic_id;		/* RVID - RTIC Version ID	*/
+#define CCBVID_ERA_MASK		0xff000000
+#define CCBVID_ERA_SHIFT	24
+	u32 ccb_id;		/* CCBVID - CCB Version ID	*/
+	u32 cha_id_ms;		/* CHAVID - CHA Version ID Most Significant*/
+	u32 cha_id_ls;		/* CHAVID - CHA Version ID Least Significant*/
+	u32 cha_num_ms;		/* CHANUM - CHA Number Most Significant	*/
+	u32 cha_num_ls;		/* CHANUM - CHA Number Least Significant*/
+#define SECVID_MS_IPID_MASK	0xffff0000
+#define SECVID_MS_IPID_SHIFT	16
+#define SECVID_MS_MAJ_REV_MASK	0x0000ff00
+#define SECVID_MS_MAJ_REV_SHIFT	8
+	u32 caam_id_ms;		/* CAAMVID - CAAM Version ID MS	*/
+	u32 caam_id_ls;		/* CAAMVID - CAAM Version ID LS	*/
+};
+
+/* LIODN programming for DMA configuration */
+#define MSTRID_LOCK_LIODN	0x80000000
+#define MSTRID_LOCK_MAKETRUSTED	0x00010000	/* only for JR masterid */
+
+#define MSTRID_LIODN_MASK	0x0fff
+struct masterid {
+	u32 liodn_ms;	/* lock and make-trusted control bits */
+	u32 liodn_ls;	/* LIODN for non-sequence and seq access */
+};
+
+/* Partition ID for DMA configuration */
+struct partid {
+	u32 rsvd1;
+	u32 pidr;	/* partition ID, DECO */
+};
+
+/* RNGB test mode (replicated twice in some configurations) */
+/* Padded out to 0x100 */
+struct rngtst {
+	u32 mode;		/* RTSTMODEx - Test mode */
+	u32 rsvd1[3];
+	u32 reset;		/* RTSTRESETx - Test reset control */
+	u32 rsvd2[3];
+	u32 status;		/* RTSTSSTATUSx - Test status */
+	u32 rsvd3;
+	u32 errstat;		/* RTSTERRSTATx - Test error status */
+	u32 rsvd4;
+	u32 errctl;		/* RTSTERRCTLx - Test error control */
+	u32 rsvd5;
+	u32 entropy;		/* RTSTENTROPYx - Test entropy */
+	u32 rsvd6[15];
+	u32 verifctl;	/* RTSTVERIFCTLx - Test verification control */
+	u32 rsvd7;
+	u32 verifstat;	/* RTSTVERIFSTATx - Test verification status */
+	u32 rsvd8;
+	u32 verifdata;	/* RTSTVERIFDx - Test verification data */
+	u32 rsvd9;
+	u32 xkey;		/* RTSTXKEYx - Test XKEY */
+	u32 rsvd10;
+	u32 oscctctl;	/* RTSTOSCCTCTLx - Test osc. counter control */
+	u32 rsvd11;
+	u32 oscct;		/* RTSTOSCCTx - Test oscillator counter */
+	u32 rsvd12;
+	u32 oscctstat;	/* RTSTODCCTSTATx - Test osc counter status */
+	u32 rsvd13[2];
+	u32 ofifo[4];	/* RTSTOFIFOx - Test output FIFO */
+	u32 rsvd14[15];
+};
+
+/* RNG4 TRNG test registers */
+struct rng4tst {
+#define RTMCTL_PRGM	0x00010000	/* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC	0 /* use von Neumann data in
+						     both entropy shifter and
+						     statistical checker */
+#define RTMCTL_SAMP_MODE_RAW_ES_SC		1 /* use raw data in both
+						     entropy shifter and
+						     statistical checker */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC	2 /* use von Neumann data in
+						     entropy shifter, raw data
+						     in statistical checker */
+#define RTMCTL_SAMP_MODE_INVALID		3 /* invalid combination */
+	u32 rtmctl;		/* misc. control register */
+	u32 rtscmisc;		/* statistical check misc. register */
+	u32 rtpkrrng;		/* poker range register */
+	union {
+		u32 rtpkrmax;	/* PRGM=1: poker max. limit register */
+		u32 rtpkrsq;	/* PRGM=0: poker square calc. result register */
+	};
+#define RTSDCTL_ENT_DLY_SHIFT 16
+#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+#define RTSDCTL_ENT_DLY_MIN 3200
+#define RTSDCTL_ENT_DLY_MAX 12800
+	u32 rtsdctl;		/* seed control register */
+	union {
+		u32 rtsblim;	/* PRGM=1: sparse bit limit register */
+		u32 rttotsam;	/* PRGM=0: total samples register */
+	};
+	u32 rtfrqmin;		/* frequency count min. limit register */
+#define RTFRQMAX_DISABLE	(1 << 20)
+	union {
+		u32 rtfrqmax;	/* PRGM=1: freq. count max. limit register */
+		u32 rtfrqcnt;	/* PRGM=0: freq. count register */
+	};
+	u32 rsvd1[40];
+#define RDSTA_SKVT 0x80000000
+#define RDSTA_SKVN 0x40000000
+#define RDSTA_IF0 0x00000001
+#define RDSTA_IF1 0x00000002
+#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
+	u32 rdsta;
+	u32 rsvd2[15];
+};
+
+/*
+ * caam_ctrl - basic core configuration
+ * starts base + 0x0000 padded out to 0x1000
+ */
+
+#define KEK_KEY_SIZE		8
+#define TKEK_KEY_SIZE		8
+#define TDSK_KEY_SIZE		8
+
+#define DECO_RESET	1	/* Use with DECO reset/availability regs */
+#define DECO_RESET_0	(DECO_RESET << 0)
+#define DECO_RESET_1	(DECO_RESET << 1)
+#define DECO_RESET_2	(DECO_RESET << 2)
+#define DECO_RESET_3	(DECO_RESET << 3)
+#define DECO_RESET_4	(DECO_RESET << 4)
+
+struct caam_ctrl {
+	/* Basic Configuration Section				000-01f */
+	/* Read/Writable					        */
+	u32 rsvd1;
+	u32 mcr;		/* MCFG      Master Config Register  */
+	u32 rsvd2;
+	u32 scfgr;		/* SCFGR, Security Config Register */
+
+	/* Bus Access Configuration Section			010-11f */
+	/* Read/Writable                                                */
+	struct masterid jr_mid[4];	/* JRxLIODNR - JobR LIODN setup */
+	u32 rsvd3[11];
+	u32 jrstart;			/* JRSTART - Job Ring Start Register */
+	struct masterid rtic_mid[4];	/* RTICxLIODNR - RTIC LIODN setup */
+	u32 rsvd4[5];
+	u32 deco_rsr;			/* DECORSR - Deco Request Source */
+	u32 rsvd11;
+	u32 deco_rq;			/* DECORR - DECO Request */
+	struct partid deco_mid[5];	/* DECOxLIODNR - 1 per DECO */
+	u32 rsvd5[22];
+
+	/* DECO Availability/Reset Section			120-3ff */
+	u32 deco_avail;		/* DAR - DECO availability */
+	u32 deco_reset;		/* DRR - DECO reset */
+	u32 rsvd6[182];
+
+	/* Key Encryption/Decryption Configuration              400-5ff */
+	/* Read/Writable only while in Non-secure mode                  */
+	u32 kek[KEK_KEY_SIZE];	/* JDKEKR - Key Encryption Key */
+	u32 tkek[TKEK_KEY_SIZE];	/* TDKEKR - Trusted Desc KEK */
+	u32 tdsk[TDSK_KEY_SIZE];	/* TDSKR - Trusted Desc Signing Key */
+	u32 rsvd7[32];
+	u64 sknonce;			/* SKNR - Secure Key Nonce */
+	u32 rsvd8[70];
+
+	/* RNG Test/Verification/Debug Access                   600-7ff */
+	/* (Useful in Test/Debug modes only...)                         */
+	union {
+		struct rngtst rtst[2];
+		struct rng4tst r4tst[2];
+	};
+
+	u32 rsvd9[448];
+
+	/* Performance Monitor                                  f00-fff */
+	struct caam_perfmon perfmon;
+};
+
+/*
+ * Controller master config register defs
+ */
+#define MCFGR_SWRESET		0x80000000 /* software reset */
+#define MCFGR_WDENABLE		0x40000000 /* DECO watchdog enable */
+#define MCFGR_WDFAIL		0x20000000 /* DECO watchdog force-fail */
+#define MCFGR_DMA_RESET		0x10000000
+#define MCFGR_LONG_PTR		0x00010000 /* Use >32-bit desc addressing */
+#define SCFGR_RDBENABLE		0x00000400
+#define SCFGR_VIRT_EN		0x00008000
+#define DECORR_RQD0ENABLE	0x00000001 /* Enable DECO0 for direct access */
+#define DECORSR_JR0		0x00000001 /* JR to supply TZ, SDID, ICID */
+#define DECORSR_VALID		0x80000000
+#define DECORR_DEN0		0x00010000 /* DECO0 available for access*/
+
+/* AXI read cache control */
+#define MCFGR_ARCACHE_SHIFT	12
+#define MCFGR_ARCACHE_MASK	(0xf << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_BUFF	(0x1 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_CACH	(0x2 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_RALL	(0x4 << MCFGR_ARCACHE_SHIFT)
+
+/* AXI write cache control */
+#define MCFGR_AWCACHE_SHIFT	8
+#define MCFGR_AWCACHE_MASK	(0xf << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_BUFF	(0x1 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_CACH	(0x2 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_WALL	(0x8 << MCFGR_AWCACHE_SHIFT)
+
+/* AXI pipeline depth */
+#define MCFGR_AXIPIPE_SHIFT	4
+#define MCFGR_AXIPIPE_MASK	(0xf << MCFGR_AXIPIPE_SHIFT)
+
+#define MCFGR_AXIPRI		0x00000008 /* Assert AXI priority sideband */
+#define MCFGR_LARGE_BURST	0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64		0x00000001 /* 64-byte burst size */
+
+/* JRSTART register offsets */
+#define JRSTART_JR0_START       0x00000001 /* Start Job ring 0 */
+#define JRSTART_JR1_START       0x00000002 /* Start Job ring 1 */
+#define JRSTART_JR2_START       0x00000004 /* Start Job ring 2 */
+#define JRSTART_JR3_START       0x00000008 /* Start Job ring 3 */
+
+/*
+ * caam_job_ring - direct job ring setup
+ * 1-4 possible per instantiation, base + 1000/2000/3000/4000
+ * Padded out to 0x1000
+ */
+struct caam_job_ring {
+	/* Input ring */
+	u64 inpring_base;	/* IRBAx -  Input desc ring baseaddr */
+	u32 rsvd1;
+	u32 inpring_size;	/* IRSx - Input ring size */
+	u32 rsvd2;
+	u32 inpring_avail;	/* IRSAx - Input ring room remaining */
+	u32 rsvd3;
+	u32 inpring_jobadd;	/* IRJAx - Input ring jobs added */
+
+	/* Output Ring */
+	u64 outring_base;	/* ORBAx - Output status ring base addr */
+	u32 rsvd4;
+	u32 outring_size;	/* ORSx - Output ring size */
+	u32 rsvd5;
+	u32 outring_rmvd;	/* ORJRx - Output ring jobs removed */
+	u32 rsvd6;
+	u32 outring_used;	/* ORSFx - Output ring slots full */
+
+	/* Status/Configuration */
+	u32 rsvd7;
+	u32 jroutstatus;	/* JRSTAx - JobR output status */
+	u32 rsvd8;
+	u32 jrintstatus;	/* JRINTx - JobR interrupt status */
+	u32 rconfig_hi;	/* JRxCFG - Ring configuration */
+	u32 rconfig_lo;
+
+	/* Indices. CAAM maintains as "heads" of each queue */
+	u32 rsvd9;
+	u32 inp_rdidx;	/* IRRIx - Input ring read index */
+	u32 rsvd10;
+	u32 out_wtidx;	/* ORWIx - Output ring write index */
+
+	/* Command/control */
+	u32 rsvd11;
+	u32 jrcommand;	/* JRCRx - JobR command */
+
+	u32 rsvd12[932];
+
+	/* Performance Monitor                                  f00-fff */
+	struct caam_perfmon perfmon;
+};
+
+#define JR_RINGSIZE_MASK	0x03ff
+/*
+ * jrstatus - Job Ring Output Status
+ * All values in lo word
+ * Also note, same values written out as status through QI
+ * in the command/status field of a frame descriptor
+ */
+#define JRSTA_SSRC_SHIFT            28
+#define JRSTA_SSRC_MASK             0xf0000000
+
+#define JRSTA_SSRC_NONE             0x00000000
+#define JRSTA_SSRC_CCB_ERROR        0x20000000
+#define JRSTA_SSRC_JUMP_HALT_USER   0x30000000
+#define JRSTA_SSRC_DECO             0x40000000
+#define JRSTA_SSRC_JRERROR          0x60000000
+#define JRSTA_SSRC_JUMP_HALT_CC     0x70000000
+
+#define JRSTA_DECOERR_JUMP          0x08000000
+#define JRSTA_DECOERR_INDEX_SHIFT   8
+#define JRSTA_DECOERR_INDEX_MASK    0xff00
+#define JRSTA_DECOERR_ERROR_MASK    0x00ff
+
+#define JRSTA_DECOERR_NONE          0x00
+#define JRSTA_DECOERR_LINKLEN       0x01
+#define JRSTA_DECOERR_LINKPTR       0x02
+#define JRSTA_DECOERR_JRCTRL        0x03
+#define JRSTA_DECOERR_DESCCMD       0x04
+#define JRSTA_DECOERR_ORDER         0x05
+#define JRSTA_DECOERR_KEYCMD        0x06
+#define JRSTA_DECOERR_LOADCMD       0x07
+#define JRSTA_DECOERR_STORECMD      0x08
+#define JRSTA_DECOERR_OPCMD         0x09
+#define JRSTA_DECOERR_FIFOLDCMD     0x0a
+#define JRSTA_DECOERR_FIFOSTCMD     0x0b
+#define JRSTA_DECOERR_MOVECMD       0x0c
+#define JRSTA_DECOERR_JUMPCMD       0x0d
+#define JRSTA_DECOERR_MATHCMD       0x0e
+#define JRSTA_DECOERR_SHASHCMD      0x0f
+#define JRSTA_DECOERR_SEQCMD        0x10
+#define JRSTA_DECOERR_DECOINTERNAL  0x11
+#define JRSTA_DECOERR_SHDESCHDR     0x12
+#define JRSTA_DECOERR_HDRLEN        0x13
+#define JRSTA_DECOERR_BURSTER       0x14
+#define JRSTA_DECOERR_DESCSIGNATURE 0x15
+#define JRSTA_DECOERR_DMA           0x16
+#define JRSTA_DECOERR_BURSTFIFO     0x17
+#define JRSTA_DECOERR_JRRESET       0x1a
+#define JRSTA_DECOERR_JOBFAIL       0x1b
+#define JRSTA_DECOERR_DNRERR        0x80
+#define JRSTA_DECOERR_UNDEFPCL      0x81
+#define JRSTA_DECOERR_PDBERR        0x82
+#define JRSTA_DECOERR_ANRPLY_LATE   0x83
+#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
+#define JRSTA_DECOERR_SEQOVF        0x85
+#define JRSTA_DECOERR_INVSIGN       0x86
+#define JRSTA_DECOERR_DSASIGN       0x87
+
+#define JRSTA_CCBERR_JUMP           0x08000000
+#define JRSTA_CCBERR_INDEX_MASK     0xff00
+#define JRSTA_CCBERR_INDEX_SHIFT    8
+#define JRSTA_CCBERR_CHAID_MASK     0x00f0
+#define JRSTA_CCBERR_CHAID_SHIFT    4
+#define JRSTA_CCBERR_ERRID_MASK     0x000f
+
+#define JRSTA_CCBERR_CHAID_AES      (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_DES      (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_ARC4     (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_MD       (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_RNG      (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_SNOW     (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_KASUMI   (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_PK       (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_CRC      (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
+
+#define JRSTA_CCBERR_ERRID_NONE     0x00
+#define JRSTA_CCBERR_ERRID_MODE     0x01
+#define JRSTA_CCBERR_ERRID_DATASIZ  0x02
+#define JRSTA_CCBERR_ERRID_KEYSIZ   0x03
+#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
+#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
+#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
+#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
+#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
+#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
+#define JRSTA_CCBERR_ERRID_ICVCHK   0x0a
+#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
+#define JRSTA_CCBERR_ERRID_CCMAAD   0x0c
+#define JRSTA_CCBERR_ERRID_INVCHA   0x0f
+
+#define JRINT_ERR_INDEX_MASK        0x3fff0000
+#define JRINT_ERR_INDEX_SHIFT       16
+#define JRINT_ERR_TYPE_MASK         0xf00
+#define JRINT_ERR_TYPE_SHIFT        8
+#define JRINT_ERR_HALT_MASK         0xc
+#define JRINT_ERR_HALT_SHIFT        2
+#define JRINT_ERR_HALT_INPROGRESS   0x4
+#define JRINT_ERR_HALT_COMPLETE     0x8
+#define JRINT_JR_ERROR              0x02
+#define JRINT_JR_INT                0x01
+
+#define JRINT_ERR_TYPE_WRITE        1
+#define JRINT_ERR_TYPE_BAD_INPADDR  3
+#define JRINT_ERR_TYPE_BAD_OUTADDR  4
+#define JRINT_ERR_TYPE_INV_INPWRT   5
+#define JRINT_ERR_TYPE_INV_OUTWRT   6
+#define JRINT_ERR_TYPE_RESET        7
+#define JRINT_ERR_TYPE_REMOVE_OFL   8
+#define JRINT_ERR_TYPE_ADD_OFL      9
+
+#define JRCFG_SOE		0x04
+#define JRCFG_ICEN		0x02
+#define JRCFG_IMSK		0x01
+#define JRCFG_ICDCT_SHIFT	8
+#define JRCFG_ICTT_SHIFT	16
+
+#define JRCR_RESET                  0x01
+
+/*
+ * caam_assurance - Assurance Controller View
+ * base + 0x6000 padded out to 0x1000
+ */
+
+struct rtic_element {
+	u64 address;
+	u32 rsvd;
+	u32 length;
+};
+
+struct rtic_block {
+	struct rtic_element element[2];
+};
+
+struct rtic_memhash {
+	u32 memhash_be[32];
+	u32 memhash_le[32];
+};
+
+struct caam_assurance {
+    /* Status/Command/Watchdog */
+	u32 rsvd1;
+	u32 status;		/* RSTA - Status */
+	u32 rsvd2;
+	u32 cmd;		/* RCMD - Command */
+	u32 rsvd3;
+	u32 ctrl;		/* RCTL - Control */
+	u32 rsvd4;
+	u32 throttle;	/* RTHR - Throttle */
+	u32 rsvd5[2];
+	u64 watchdog;	/* RWDOG - Watchdog Timer */
+	u32 rsvd6;
+	u32 rend;		/* REND - Endian corrections */
+	u32 rsvd7[50];
+
+	/* Block access/configuration @ 100/110/120/130 */
+	struct rtic_block memblk[4];	/* Memory Blocks A-D */
+	u32 rsvd8[32];
+
+	/* Block hashes @ 200/300/400/500 */
+	struct rtic_memhash hash[4];	/* Block hash values A-D */
+	u32 rsvd_3[640];
+};
+
+/*
+ * caam_queue_if - QI configuration and control
+ * starts base + 0x7000, padded out to 0x1000 long
+ */
+
+struct caam_queue_if {
+	u32 qi_control_hi;	/* QICTL  - QI Control */
+	u32 qi_control_lo;
+	u32 rsvd1;
+	u32 qi_status;	/* QISTA  - QI Status */
+	u32 qi_deq_cfg_hi;	/* QIDQC  - QI Dequeue Configuration */
+	u32 qi_deq_cfg_lo;
+	u32 qi_enq_cfg_hi;	/* QISEQC - QI Enqueue Command     */
+	u32 qi_enq_cfg_lo;
+	u32 rsvd2[1016];
+};
+
+/* QI control bits - low word */
+#define QICTL_DQEN      0x01              /* Enable frame pop          */
+#define QICTL_STOP      0x02              /* Stop dequeue/enqueue      */
+#define QICTL_SOE       0x04              /* Stop on error             */
+
+/* QI control bits - high word */
+#define QICTL_MBSI	0x01
+#define QICTL_MHWSI	0x02
+#define QICTL_MWSI	0x04
+#define QICTL_MDWSI	0x08
+#define QICTL_CBSI	0x10		/* CtrlDataByteSwapInput     */
+#define QICTL_CHWSI	0x20		/* CtrlDataHalfSwapInput     */
+#define QICTL_CWSI	0x40		/* CtrlDataWordSwapInput     */
+#define QICTL_CDWSI	0x80		/* CtrlDataDWordSwapInput    */
+#define QICTL_MBSO	0x0100
+#define QICTL_MHWSO	0x0200
+#define QICTL_MWSO	0x0400
+#define QICTL_MDWSO	0x0800
+#define QICTL_CBSO	0x1000		/* CtrlDataByteSwapOutput    */
+#define QICTL_CHWSO	0x2000		/* CtrlDataHalfSwapOutput    */
+#define QICTL_CWSO	0x4000		/* CtrlDataWordSwapOutput    */
+#define QICTL_CDWSO     0x8000		/* CtrlDataDWordSwapOutput   */
+#define QICTL_DMBS	0x010000
+#define QICTL_EPO	0x020000
+
+/* QI status bits */
+#define QISTA_PHRDERR   0x01              /* PreHeader Read Error      */
+#define QISTA_CFRDERR   0x02              /* Compound Frame Read Error */
+#define QISTA_OFWRERR   0x04              /* Output Frame Read Error   */
+#define QISTA_BPDERR    0x08              /* Buffer Pool Depleted      */
+#define QISTA_BTSERR    0x10              /* Buffer Undersize          */
+#define QISTA_CFWRERR   0x20              /* Compound Frame Write Err  */
+#define QISTA_STOPD     0x80000000        /* QI Stopped (see QICTL)    */
+
+/* deco_sg_table - DECO view of scatter/gather table */
+struct deco_sg_table {
+	u64 addr;		/* Segment Address */
+	u32 elen;		/* E, F bits + 30-bit length */
+	u32 bpid_offset;	/* Buffer Pool ID + 16-bit length */
+};
+
+/*
+ * caam_deco - descriptor controller - CHA cluster block
+ *
+ * Only accessible when direct DECO access is turned on
+ * (done in DECORR, via MID programmed in DECOxMID
+ *
+ * 5 typical, base + 0x8000/9000/a000/b000
+ * Padded out to 0x1000 long
+ */
+struct caam_deco {
+	u32 rsvd1;
+	u32 cls1_mode;	/* CxC1MR -  Class 1 Mode */
+	u32 rsvd2;
+	u32 cls1_keysize;	/* CxC1KSR - Class 1 Key Size */
+	u32 cls1_datasize_hi;	/* CxC1DSR - Class 1 Data Size */
+	u32 cls1_datasize_lo;
+	u32 rsvd3;
+	u32 cls1_icvsize;	/* CxC1ICVSR - Class 1 ICV size */
+	u32 rsvd4[5];
+	u32 cha_ctrl;	/* CCTLR - CHA control */
+	u32 rsvd5;
+	u32 irq_crtl;	/* CxCIRQ - CCB interrupt done/error/clear */
+	u32 rsvd6;
+	u32 clr_written;	/* CxCWR - Clear-Written */
+	u32 ccb_status_hi;	/* CxCSTA - CCB Status/Error */
+	u32 ccb_status_lo;
+	u32 rsvd7[3];
+	u32 aad_size;	/* CxAADSZR - Current AAD Size */
+	u32 rsvd8;
+	u32 cls1_iv_size;	/* CxC1IVSZR - Current Class 1 IV Size */
+	u32 rsvd9[7];
+	u32 pkha_a_size;	/* PKASZRx - Size of PKHA A */
+	u32 rsvd10;
+	u32 pkha_b_size;	/* PKBSZRx - Size of PKHA B */
+	u32 rsvd11;
+	u32 pkha_n_size;	/* PKNSZRx - Size of PKHA N */
+	u32 rsvd12;
+	u32 pkha_e_size;	/* PKESZRx - Size of PKHA E */
+	u32 rsvd13[24];
+	u32 cls1_ctx[16];	/* CxC1CTXR - Class 1 Context @100 */
+	u32 rsvd14[48];
+	u32 cls1_key[8];	/* CxC1KEYR - Class 1 Key @200 */
+	u32 rsvd15[121];
+	u32 cls2_mode;	/* CxC2MR - Class 2 Mode */
+	u32 rsvd16;
+	u32 cls2_keysize;	/* CxX2KSR - Class 2 Key Size */
+	u32 cls2_datasize_hi;	/* CxC2DSR - Class 2 Data Size */
+	u32 cls2_datasize_lo;
+	u32 rsvd17;
+	u32 cls2_icvsize;	/* CxC2ICVSZR - Class 2 ICV Size */
+	u32 rsvd18[56];
+	u32 cls2_ctx[18];	/* CxC2CTXR - Class 2 Context @500 */
+	u32 rsvd19[46];
+	u32 cls2_key[32];	/* CxC2KEYR - Class2 Key @600 */
+	u32 rsvd20[84];
+	u32 inp_infofifo_hi;	/* CxIFIFO - Input Info FIFO @7d0 */
+	u32 inp_infofifo_lo;
+	u32 rsvd21[2];
+	u64 inp_datafifo;	/* CxDFIFO - Input Data FIFO */
+	u32 rsvd22[2];
+	u64 out_datafifo;	/* CxOFIFO - Output Data FIFO */
+	u32 rsvd23[2];
+	u32 jr_ctl_hi;	/* CxJRR - JobR Control Register      @800 */
+	u32 jr_ctl_lo;
+	u64 jr_descaddr;	/* CxDADR - JobR Descriptor Address */
+#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
+	u32 op_status_hi;	/* DxOPSTA - DECO Operation Status */
+	u32 op_status_lo;
+	u32 rsvd24[2];
+	u32 liodn;		/* DxLSR - DECO LIODN Status - non-seq */
+	u32 td_liodn;	/* DxLSR - DECO LIODN Status - trustdesc */
+	u32 rsvd26[6];
+	u64 math[4];		/* DxMTH - Math register */
+	u32 rsvd27[8];
+	struct deco_sg_table gthr_tbl[4];	/* DxGTR - Gather Tables */
+	u32 rsvd28[16];
+	struct deco_sg_table sctr_tbl[4];	/* DxSTR - Scatter Tables */
+	u32 rsvd29[48];
+	u32 descbuf[64];	/* DxDESB - Descriptor buffer */
+	u32 rscvd30[193];
+#define DESC_DBG_DECO_STAT_HOST_ERR	0x00D00000
+#define DESC_DBG_DECO_STAT_VALID	0x80000000
+#define DESC_DBG_DECO_STAT_MASK		0x00F00000
+	u32 desc_dbg;		/* DxDDR - DECO Debug Register */
+	u32 rsvd31[126];
+};
+
+#define DECO_JQCR_WHL		0x20000000
+#define DECO_JQCR_FOUR		0x10000000
+
+#define JR_BLOCK_NUMBER		1
+#define ASSURE_BLOCK_NUMBER	6
+#define QI_BLOCK_NUMBER		7
+#define DECO_BLOCK_NUMBER	8
+#define PG_SIZE_4K		0x1000
+#define PG_SIZE_64K		0x10000
+#endif /* REGS_H */
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
new file mode 100644
index 0000000..d000b4d
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SG_SW_QM_H
+#define __SG_SW_QM_H
+
+#include <soc/fsl/qman.h>
+#include "regs.h"
+
+static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
+				  u16 offset)
+{
+	qm_sg_entry_set64(qm_sg_ptr, dma);
+	qm_sg_ptr->__reserved2 = 0;
+	qm_sg_ptr->bpid = 0;
+	qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
+}
+
+static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
+				    dma_addr_t dma, u32 len, u16 offset)
+{
+	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
+	qm_sg_entry_set_len(qm_sg_ptr, len);
+}
+
+static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
+					 dma_addr_t dma, u32 len, u16 offset)
+{
+	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
+	qm_sg_entry_set_f(qm_sg_ptr, len);
+}
+
+static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
+					dma_addr_t dma, u32 len, u16 offset)
+{
+	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
+	qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
+}
+
+static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
+					     dma_addr_t dma, u32 len,
+					     u16 offset)
+{
+	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
+	qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
+				     (len & QM_SG_LEN_MASK));
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct qm_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+	    struct qm_sg_entry *qm_sg_ptr, u16 offset)
+{
+	while (sg_count && sg) {
+		dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+				 sg_dma_len(sg), offset);
+		qm_sg_ptr++;
+		sg = sg_next(sg);
+		sg_count--;
+	}
+	return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+				    struct qm_sg_entry *qm_sg_ptr, u16 offset)
+{
+	qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+	qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
+}
+
+#endif /* __SG_SW_QM_H */
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
new file mode 100644
index 0000000..b5b4c12
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the names of the above-listed copyright holders nor the
+ *	 names of any contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SG_SW_QM2_H_
+#define _SG_SW_QM2_H_
+
+#include <soc/fsl/dpaa2-fd.h>
+
+static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
+				    dma_addr_t dma, u32 len, u16 offset)
+{
+	dpaa2_sg_set_addr(qm_sg_ptr, dma);
+	dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
+	dpaa2_sg_set_final(qm_sg_ptr, false);
+	dpaa2_sg_set_len(qm_sg_ptr, len);
+	dpaa2_sg_set_bpid(qm_sg_ptr, 0);
+	dpaa2_sg_set_offset(qm_sg_ptr, offset);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct dpaa2_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+	    struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
+{
+	while (sg_count && sg) {
+		dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+				 sg_dma_len(sg), offset);
+		qm_sg_ptr++;
+		sg = sg_next(sg);
+		sg_count--;
+	}
+	return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+				    struct dpaa2_sg_entry *qm_sg_ptr,
+				    u16 offset)
+{
+	qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+	dpaa2_sg_set_final(qm_sg_ptr, true);
+}
+
+#endif /* _SG_SW_QM2_H_ */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
new file mode 100644
index 0000000..dbfa9fc
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM/SEC 4.x functions for using scatterlists in caam driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef _SG_SW_SEC4_H_
+#define _SG_SW_SEC4_H_
+
+#include "ctrl.h"
+#include "regs.h"
+#include "sg_sw_qm2.h"
+#include <soc/fsl/dpaa2-fd.h>
+
+struct sec4_sg_entry {
+	u64 ptr;
+	u32 len;
+	u32 bpid_offset;
+};
+
+/*
+ * convert single dma address to h/w link table format
+ */
+static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
+				      dma_addr_t dma, u32 len, u16 offset)
+{
+	if (caam_dpaa2) {
+		dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
+				 offset);
+	} else {
+		sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
+		sec4_sg_ptr->len = cpu_to_caam32(len);
+		sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
+							 SEC4_SG_OFFSET_MASK);
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
+		       sizeof(struct sec4_sg_entry), 1);
+#endif
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct sec4_sg_entry *
+sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+	      struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
+{
+	while (sg_count) {
+		dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
+				   sg_dma_len(sg), offset);
+		sec4_sg_ptr++;
+		sg = sg_next(sg);
+		sg_count--;
+	}
+	return sec4_sg_ptr - 1;
+}
+
+static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
+{
+	if (caam_dpaa2)
+		dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
+	else
+		sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+				      struct sec4_sg_entry *sec4_sg_ptr,
+				      u16 offset)
+{
+	sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+	sg_to_sec4_set_last(sec4_sg_ptr);
+}
+
+#endif /* _SG_SW_SEC4_H_ */
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
new file mode 100644
index 0000000..641268b
--- /dev/null
+++ b/drivers/crypto/cavium/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for Cavium crypto device drivers
+#
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
diff --git a/drivers/crypto/cavium/cpt/Kconfig b/drivers/crypto/cavium/cpt/Kconfig
new file mode 100644
index 0000000..cbd51b1
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/Kconfig
@@ -0,0 +1,17 @@
+#
+# Cavium crypto device configuration
+#
+
+config CRYPTO_DEV_CPT
+	tristate
+
+config CAVIUM_CPT
+	tristate "Cavium Cryptographic Accelerator driver"
+	depends on ARCH_THUNDER || COMPILE_TEST
+	depends on PCI_MSI && 64BIT
+	select CRYPTO_DEV_CPT
+	help
+	  Support for Cavium CPT block found in octeon-tx series of
+	  processors.
+
+	  To compile this as a module, choose M here.
diff --git a/drivers/crypto/cavium/cpt/Makefile b/drivers/crypto/cavium/cpt/Makefile
new file mode 100644
index 0000000..dbf055e
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CAVIUM_CPT) += cptpf.o cptvf.o
+cptpf-objs := cptpf_main.o cptpf_mbox.o
+cptvf-objs := cptvf_main.o cptvf_reqmanager.o cptvf_mbox.o cptvf_algs.o
diff --git a/drivers/crypto/cavium/cpt/cpt_common.h b/drivers/crypto/cavium/cpt/cpt_common.h
new file mode 100644
index 0000000..225078d
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cpt_common.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __CPT_COMMON_H
+#define __CPT_COMMON_H
+
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include "cpt_hw_types.h"
+
+/* Device ID */
+#define CPT_81XX_PCI_PF_DEVICE_ID 0xa040
+#define CPT_81XX_PCI_VF_DEVICE_ID 0xa041
+
+/* flags to indicate the features supported */
+#define CPT_FLAG_SRIOV_ENABLED BIT(1)
+#define CPT_FLAG_VF_DRIVER BIT(2)
+#define CPT_FLAG_DEVICE_READY BIT(3)
+
+#define cpt_sriov_enabled(cpt) ((cpt)->flags & CPT_FLAG_SRIOV_ENABLED)
+#define cpt_vf_driver(cpt) ((cpt)->flags & CPT_FLAG_VF_DRIVER)
+#define cpt_device_ready(cpt) ((cpt)->flags & CPT_FLAG_DEVICE_READY)
+
+#define CPT_MBOX_MSG_TYPE_ACK 1
+#define CPT_MBOX_MSG_TYPE_NACK 2
+#define CPT_MBOX_MSG_TIMEOUT 2000
+#define VF_STATE_DOWN 0
+#define VF_STATE_UP 1
+
+/*
+ * CPT Registers map for 81xx
+ */
+
+/* PF registers */
+#define CPTX_PF_CONSTANTS(a) (0x0ll + ((u64)(a) << 36))
+#define CPTX_PF_RESET(a) (0x100ll + ((u64)(a) << 36))
+#define CPTX_PF_DIAG(a) (0x120ll + ((u64)(a) << 36))
+#define CPTX_PF_BIST_STATUS(a) (0x160ll + ((u64)(a) << 36))
+#define CPTX_PF_ECC0_CTL(a) (0x200ll + ((u64)(a) << 36))
+#define CPTX_PF_ECC0_FLIP(a) (0x210ll + ((u64)(a) << 36))
+#define CPTX_PF_ECC0_INT(a) (0x220ll + ((u64)(a) << 36))
+#define CPTX_PF_ECC0_INT_W1S(a) (0x230ll + ((u64)(a) << 36))
+#define CPTX_PF_ECC0_ENA_W1S(a)	(0x240ll + ((u64)(a) << 36))
+#define CPTX_PF_ECC0_ENA_W1C(a)	(0x250ll + ((u64)(a) << 36))
+#define CPTX_PF_MBOX_INTX(a, b)	\
+	(0x400ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_MBOX_INT_W1SX(a, b) \
+	(0x420ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_MBOX_ENA_W1CX(a, b) \
+	(0x440ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_MBOX_ENA_W1SX(a, b) \
+	(0x460ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_EXEC_INT(a) (0x500ll + 0x1000000000ll * ((a) & 0x1))
+#define CPTX_PF_EXEC_INT_W1S(a)	(0x520ll + ((u64)(a) << 36))
+#define CPTX_PF_EXEC_ENA_W1C(a)	(0x540ll + ((u64)(a) << 36))
+#define CPTX_PF_EXEC_ENA_W1S(a)	(0x560ll + ((u64)(a) << 36))
+#define CPTX_PF_GX_EN(a, b) \
+	(0x600ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_EXEC_INFO(a) (0x700ll + ((u64)(a) << 36))
+#define CPTX_PF_EXEC_BUSY(a) (0x800ll + ((u64)(a) << 36))
+#define CPTX_PF_EXEC_INFO0(a) (0x900ll + ((u64)(a) << 36))
+#define CPTX_PF_EXEC_INFO1(a) (0x910ll + ((u64)(a) << 36))
+#define CPTX_PF_INST_REQ_PC(a) (0x10000ll + ((u64)(a) << 36))
+#define CPTX_PF_INST_LATENCY_PC(a) \
+	(0x10020ll + ((u64)(a) << 36))
+#define CPTX_PF_RD_REQ_PC(a) (0x10040ll + ((u64)(a) << 36))
+#define CPTX_PF_RD_LATENCY_PC(a) (0x10060ll + ((u64)(a) << 36))
+#define CPTX_PF_RD_UC_PC(a) (0x10080ll + ((u64)(a) << 36))
+#define CPTX_PF_ACTIVE_CYCLES_PC(a) (0x10100ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_CTL(a) (0x4000000ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_STATUS(a) (0x4000008ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_CLK(a) (0x4000010ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_DBG_CTL(a) (0x4000018ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_DBG_DATA(a)	(0x4000020ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_BIST_STATUS(a) (0x4000028ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_REQ_TIMER(a) (0x4000030ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_MEM_CTL(a) (0x4000038ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_PERF_CTL(a)	(0x4001000ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_DBG_CNTX(a, b) \
+	(0x4001100ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_EXE_PERF_EVENT_CNT(a) (0x4001180ll + ((u64)(a) << 36))
+#define CPTX_PF_EXE_EPCI_INBX_CNT(a, b) \
+	(0x4001200ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_EXE_EPCI_OUTBX_CNT(a, b) \
+	(0x4001240ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_ENGX_UCODE_BASE(a, b) \
+	(0x4002000ll + ((u64)(a) << 36) + ((b) << 3))
+#define CPTX_PF_QX_CTL(a, b) \
+	(0x8000000ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_PF_QX_GMCTL(a, b) \
+	(0x8000020ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_PF_QX_CTL2(a, b) \
+	(0x8000100ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_PF_VFX_MBOXX(a, b, c) \
+	(0x8001000ll + ((u64)(a) << 36) + ((b) << 20) + ((c) << 8))
+
+/* VF registers */
+#define CPTX_VQX_CTL(a, b) (0x100ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_SADDR(a, b) (0x200ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_DONE_WAIT(a, b) (0x400ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_INPROG(a, b) (0x410ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_DONE(a, b) (0x420ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_DONE_ACK(a, b) (0x440ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_DONE_INT_W1S(a, b) (0x460ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_DONE_INT_W1C(a, b) (0x468ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_DONE_ENA_W1S(a, b) (0x470ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_DONE_ENA_W1C(a, b) (0x478ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_MISC_INT(a, b)	(0x500ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_MISC_INT_W1S(a, b) (0x508ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_MISC_ENA_W1S(a, b) (0x510ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_MISC_ENA_W1C(a, b) (0x518ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VQX_DOORBELL(a, b) (0x600ll + ((u64)(a) << 36) + ((b) << 20))
+#define CPTX_VFX_PF_MBOXX(a, b, c) \
+	(0x1000ll + ((u64)(a) << 36) + ((b) << 20) + ((c) << 3))
+
+enum vftype {
+	AE_TYPES = 1,
+	SE_TYPES = 2,
+	BAD_CPT_TYPES,
+};
+
+/* Max CPT devices supported */
+enum cpt_mbox_opcode {
+	CPT_MSG_VF_UP = 1,
+	CPT_MSG_VF_DOWN,
+	CPT_MSG_READY,
+	CPT_MSG_QLEN,
+	CPT_MSG_QBIND_GRP,
+	CPT_MSG_VQ_PRIORITY,
+};
+
+/* CPT mailbox structure */
+struct cpt_mbox {
+	u64 msg; /* Message type MBOX[0] */
+	u64 data;/* Data         MBOX[1] */
+};
+
+/* Register read/write APIs */
+static inline void cpt_write_csr64(u8 __iomem *hw_addr, u64 offset,
+				   u64 val)
+{
+	writeq(val, hw_addr + offset);
+}
+
+static inline u64 cpt_read_csr64(u8 __iomem *hw_addr, u64 offset)
+{
+	return readq(hw_addr + offset);
+}
+#endif /* __CPT_COMMON_H */
diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h
new file mode 100644
index 0000000..2796694
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __CPT_HW_TYPES_H
+#define __CPT_HW_TYPES_H
+
+#include "cpt_common.h"
+
+/**
+ * Enumeration cpt_comp_e
+ *
+ * CPT Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+enum cpt_comp_e {
+	CPT_COMP_E_NOTDONE = 0x00,
+	CPT_COMP_E_GOOD = 0x01,
+	CPT_COMP_E_FAULT = 0x02,
+	CPT_COMP_E_SWERR = 0x03,
+	CPT_COMP_E_LAST_ENTRY = 0xFF
+};
+
+/**
+ * Structure cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout. Instructions are
+ * stored in memory as little-endian unless CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_inst_s_s
+ * Word 0
+ * doneint:1 Done interrupt.
+ *	0 = No interrupts related to this instruction.
+ *	1 = When the instruction completes, CPT()_VQ()_DONE[DONE] will be
+ *	incremented,and based on the rules described there an interrupt may
+ *	occur.
+ * Word 1
+ * res_addr [127: 64] Result IOVA.
+ *	If nonzero, specifies where to write CPT_RES_S.
+ *	If zero, no result structure will be written.
+ *	Address must be 16-byte aligned.
+ *	Bits <63:49> are ignored by hardware; software should use a
+ *	sign-extended bit <48> for forward compatibility.
+ * Word 2
+ *  grp:10 [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to use when
+ *	CPT submits work SSO.
+ *	For the SSO to not discard the add-work request, FPA_PF_MAP() must map
+ *	[GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ *  tt:2 [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use when CPT
+ *	submits work to SSO
+ *  tag:32 [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when CPT
+ *	submits work to SSO.
+ * Word 3
+ *  wq_ptr [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ *	work-queue entry that CPT submits work to SSO after all context,
+ *	output data, and result write operations are visible to other
+ *	CNXXXX units and the cores. Bits <2:0> must be zero.
+ *	Bits <63:49> are ignored by hardware; software should
+ *	use a sign-extended bit <48> for forward compatibility.
+ *	Internal:
+ *	Bits <63:49>, <2:0> are ignored by hardware, treated as always 0x0.
+ * Word 4
+ *  ei0; [319:256] Engine instruction word 0. Passed to the AE/SE.
+ * Word 5
+ *  ei1; [383:320] Engine instruction word 1. Passed to the AE/SE.
+ * Word 6
+ *  ei2; [447:384] Engine instruction word 1. Passed to the AE/SE.
+ * Word 7
+ *  ei3; [511:448] Engine instruction word 1. Passed to the AE/SE.
+ *
+ */
+union cpt_inst_s {
+	u64 u[8];
+	struct cpt_inst_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_17_63:47;
+		u64 doneint:1;
+		u64 reserved_0_1:16;
+#else /* Word 0 - Little Endian */
+		u64 reserved_0_15:16;
+		u64 doneint:1;
+		u64 reserved_17_63:47;
+#endif /* Word 0 - End */
+		u64 res_addr;
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+		u64 reserved_172_19:20;
+		u64 grp:10;
+		u64 tt:2;
+		u64 tag:32;
+#else /* Word 2 - Little Endian */
+		u64 tag:32;
+		u64 tt:2;
+		u64 grp:10;
+		u64 reserved_172_191:20;
+#endif /* Word 2 - End */
+		u64 wq_ptr;
+		u64 ei0;
+		u64 ei1;
+		u64 ei2;
+		u64 ei3;
+	} s;
+};
+
+/**
+ * Structure cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and
+ * each instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_res_s_s
+ * Word 0
+ *  doneint:1 [16:16] Done interrupt. This bit is copied from the
+ *	corresponding instruction's CPT_INST_S[DONEINT].
+ *  compcode:8 [7:0] Indicates completion/error status of the CPT coprocessor
+ *	for the	associated instruction, as enumerated by CPT_COMP_E.
+ *	Core software may write the memory location containing [COMPCODE] to
+ *	0x0 before ringing the doorbell, and then poll for completion by
+ *	checking for a nonzero value.
+ *	Once the core observes a nonzero [COMPCODE] value in this case,the CPT
+ *	coprocessor will have also completed L2/DRAM write operations.
+ * Word 1
+ *  reserved
+ *
+ */
+union cpt_res_s {
+	u64 u[2];
+	struct cpt_res_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_17_63:47;
+		u64 doneint:1;
+		u64 reserved_8_15:8;
+		u64 compcode:8;
+#else /* Word 0 - Little Endian */
+		u64 compcode:8;
+		u64 reserved_8_15:8;
+		u64 doneint:1;
+		u64 reserved_17_63:47;
+#endif /* Word 0 - End */
+		u64 reserved_64_127;
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_pf_bist_status
+ *
+ * CPT PF Control Bist Status Register
+ * This register has the BIST status of memories. Each bit is the BIST result
+ * of an individual memory (per bit, 0 = pass and 1 = fail).
+ * cptx_pf_bist_status_s
+ * Word0
+ *  bstatus [29:0](RO/H) BIST status. One bit per memory, enumerated by
+ *	CPT_RAMS_E.
+ */
+union cptx_pf_bist_status {
+	u64 u;
+	struct cptx_pf_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_30_63:34;
+		u64 bstatus:30;
+#else /* Word 0 - Little Endian */
+		u64 bstatus:30;
+		u64 reserved_30_63:34;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_pf_constants
+ *
+ * CPT PF Constants Register
+ * This register contains implementation-related parameters of CPT in CNXXXX.
+ * cptx_pf_constants_s
+ * Word 0
+ *  reserved_40_63:24 [63:40] Reserved.
+ *  epcis:8 [39:32](RO) Number of EPCI busses.
+ *  grps:8 [31:24](RO) Number of engine groups implemented.
+ *  ae:8 [23:16](RO/H) Number of AEs. In CNXXXX, for CPT0 returns 0x0,
+ *	for CPT1 returns 0x18, or less if there are fuse-disables.
+ *  se:8 [15:8](RO/H) Number of SEs. In CNXXXX, for CPT0 returns 0x30,
+ *	or less if there are fuse-disables, for CPT1 returns 0x0.
+ *  vq:8 [7:0](RO) Number of VQs.
+ */
+union cptx_pf_constants {
+	u64 u;
+	struct cptx_pf_constants_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_40_63:24;
+		u64 epcis:8;
+		u64 grps:8;
+		u64 ae:8;
+		u64 se:8;
+		u64 vq:8;
+#else /* Word 0 - Little Endian */
+		u64 vq:8;
+		u64 se:8;
+		u64 ae:8;
+		u64 grps:8;
+		u64 epcis:8;
+		u64 reserved_40_63:24;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_pf_exe_bist_status
+ *
+ * CPT PF Engine Bist Status Register
+ * This register has the BIST status of each engine.  Each bit is the
+ * BIST result of an individual engine (per bit, 0 = pass and 1 = fail).
+ * cptx_pf_exe_bist_status_s
+ * Word0
+ *  reserved_48_63:16 [63:48] reserved
+ *  bstatus:48 [47:0](RO/H) BIST status. One bit per engine.
+ *
+ */
+union cptx_pf_exe_bist_status {
+	u64 u;
+	struct cptx_pf_exe_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_48_63:16;
+		u64 bstatus:48;
+#else /* Word 0 - Little Endian */
+		u64 bstatus:48;
+		u64 reserved_48_63:16;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_pf_q#_ctl
+ *
+ * CPT Queue Control Register
+ * This register configures queues. This register should be changed only
+ * when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
+ * cptx_pf_qx_ctl_s
+ * Word0
+ *  reserved_60_63:4 [63:60] reserved.
+ *  aura:12; [59:48](R/W) Guest-aura for returning this queue's
+ *	instruction-chunk buffers to FPA. Only used when [INST_FREE] is set.
+ *	For the FPA to not discard the request, FPA_PF_MAP() must map
+ *	[AURA] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ *  reserved_45_47:3 [47:45] reserved.
+ *  size:13 [44:32](R/W) Command-buffer size, in number of 64-bit words per
+ *	command buffer segment. Must be 8*n + 1, where n is the number of
+ *	instructions per buffer segment.
+ *  reserved_11_31:21 [31:11] Reserved.
+ *  cont_err:1 [10:10](R/W) Continue on error.
+ *	0 = When CPT()_VQ()_MISC_INT[NWRP], CPT()_VQ()_MISC_INT[IRDE] or
+ *	CPT()_VQ()_MISC_INT[DOVF] are set by hardware or software via
+ *	CPT()_VQ()_MISC_INT_W1S, then CPT()_VQ()_CTL[ENA] is cleared.  Due to
+ *	pipelining, additional instructions may have been processed between the
+ *	instruction causing the error and the next instruction in the disabled
+ *	queue (the instruction at CPT()_VQ()_SADDR).
+ *	1 = Ignore errors and continue processing instructions.
+ *	For diagnostic use only.
+ *  inst_free:1 [9:9](R/W) Instruction FPA free. When set, when CPT reaches the
+ *	end of an instruction chunk, that chunk will be freed to the FPA.
+ *  inst_be:1 [8:8](R/W) Instruction big-endian control. When set, instructions,
+ *	instruction next chunk pointers, and result structures are stored in
+ *	big-endian format in memory.
+ *  iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
+ *	0 = The hardware issues NCB transient load (LDT) towards the cache,
+ *	which if the line hits and is is dirty will cause the line to be
+ *	written back before being replaced.
+ *	1 = The hardware issues NCB LDWB read-and-invalidate command towards
+ *	the cache when fetching the last word of instructions; as a result the
+ *	line will not be written back when replaced.  This improves
+ *	performance, but software must not read the instructions after they are
+ *	posted to the hardware.	Reads that do not consume the last word of a
+ *	cache line always use LDI.
+ *  reserved_4_6:3 [6:4] Reserved.
+ *  grp:3; [3:1](R/W) Engine group.
+ *  pri:1; [0:0](R/W) Queue priority.
+ *	1 = This queue has higher priority. Round-robin between higher
+ *	priority queues.
+ *	0 = This queue has lower priority. Round-robin between lower
+ *	priority queues.
+ */
+union cptx_pf_qx_ctl {
+	u64 u;
+	struct cptx_pf_qx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_60_63:4;
+		u64 aura:12;
+		u64 reserved_45_47:3;
+		u64 size:13;
+		u64 reserved_11_31:21;
+		u64 cont_err:1;
+		u64 inst_free:1;
+		u64 inst_be:1;
+		u64 iqb_ldwb:1;
+		u64 reserved_4_6:3;
+		u64 grp:3;
+		u64 pri:1;
+#else /* Word 0 - Little Endian */
+		u64 pri:1;
+		u64 grp:3;
+		u64 reserved_4_6:3;
+		u64 iqb_ldwb:1;
+		u64 inst_be:1;
+		u64 inst_free:1;
+		u64 cont_err:1;
+		u64 reserved_11_31:21;
+		u64 size:13;
+		u64 reserved_45_47:3;
+		u64 aura:12;
+		u64 reserved_60_63:4;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_saddr
+ *
+ * CPT Queue Starting Buffer Address Registers
+ * These registers set the instruction buffer starting address.
+ * cptx_vqx_saddr_s
+ * Word0
+ *  reserved_49_63:15 [63:49] Reserved.
+ *  ptr:43 [48:6](R/W/H) Instruction buffer IOVA <48:6> (64-byte aligned).
+ *	When written, it is the initial buffer starting address; when read,
+ *	it is the next read pointer to be requested from L2C. The PTR field
+ *	is overwritten with the next pointer each time that the command buffer
+ *	segment is exhausted. New commands will then be read from the newly
+ *	specified command buffer pointer.
+ *  reserved_0_5:6 [5:0] Reserved.
+ *
+ */
+union cptx_vqx_saddr {
+	u64 u;
+	struct cptx_vqx_saddr_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_49_63:15;
+		u64 ptr:43;
+		u64 reserved_0_5:6;
+#else /* Word 0 - Little Endian */
+		u64 reserved_0_5:6;
+		u64 ptr:43;
+		u64 reserved_49_63:15;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_misc_ena_w1s
+ *
+ * CPT Queue Misc Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ * cptx_vqx_misc_ena_w1s_s
+ * Word0
+ * reserved_5_63:59 [63:5] Reserved.
+ * swerr:1 [4:4](R/W1S/H) Reads or sets enable for
+ *	CPT(0..1)_VQ(0..63)_MISC_INT[SWERR].
+ * nwrp:1 [3:3](R/W1S/H) Reads or sets enable for
+ *	CPT(0..1)_VQ(0..63)_MISC_INT[NWRP].
+ * irde:1 [2:2](R/W1S/H) Reads or sets enable for
+ *	CPT(0..1)_VQ(0..63)_MISC_INT[IRDE].
+ * dovf:1 [1:1](R/W1S/H) Reads or sets enable for
+ *	CPT(0..1)_VQ(0..63)_MISC_INT[DOVF].
+ * mbox:1 [0:0](R/W1S/H) Reads or sets enable for
+ *	CPT(0..1)_VQ(0..63)_MISC_INT[MBOX].
+ *
+ */
+union cptx_vqx_misc_ena_w1s {
+	u64 u;
+	struct cptx_vqx_misc_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_5_63:59;
+		u64 swerr:1;
+		u64 nwrp:1;
+		u64 irde:1;
+		u64 dovf:1;
+		u64 mbox:1;
+#else /* Word 0 - Little Endian */
+		u64 mbox:1;
+		u64 dovf:1;
+		u64 irde:1;
+		u64 nwrp:1;
+		u64 swerr:1;
+		u64 reserved_5_63:59;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_doorbell
+ *
+ * CPT Queue Doorbell Registers
+ * Doorbells for the CPT instruction queues.
+ * cptx_vqx_doorbell_s
+ * Word0
+ *  reserved_20_63:44 [63:20] Reserved.
+ *  dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
+ *	to the CPT instruction doorbell count. Readback value is the the
+ *	current number of pending doorbell requests. If counter overflows
+ *	CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
+ *	zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
+ *	then write a value of 2^20 minus the read [DBELL_CNT], then write one
+ *	to CPT()_VQ()_MISC_INT_W1C[DBELL_DOVF] and
+ *	CPT()_VQ()_MISC_INT_ENA_W1S[DBELL_DOVF]. Must be a multiple of 8.
+ *	All CPT instructions are 8 words and require a doorbell count of
+ *	multiple of 8.
+ */
+union cptx_vqx_doorbell {
+	u64 u;
+	struct cptx_vqx_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_20_63:44;
+		u64 dbell_cnt:20;
+#else /* Word 0 - Little Endian */
+		u64 dbell_cnt:20;
+		u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_inprog
+ *
+ * CPT Queue In Progress Count Registers
+ * These registers contain the per-queue instruction in flight registers.
+ * cptx_vqx_inprog_s
+ * Word0
+ *  reserved_8_63:56 [63:8] Reserved.
+ *  inflight:8 [7:0](RO/H) Inflight count. Counts the number of instructions
+ *	for the VF for which CPT is fetching, executing or responding to
+ *	instructions. However this does not include any interrupts that are
+ *	awaiting software handling (CPT()_VQ()_DONE[DONE] != 0x0).
+ *	A queue may not be reconfigured until:
+ *	1. CPT()_VQ()_CTL[ENA] is cleared by software.
+ *	2. [INFLIGHT] is polled until equals to zero.
+ */
+union cptx_vqx_inprog {
+	u64 u;
+	struct cptx_vqx_inprog_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_8_63:56;
+		u64 inflight:8;
+#else /* Word 0 - Little Endian */
+		u64 inflight:8;
+		u64 reserved_8_63:56;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_misc_int
+ *
+ * CPT Queue Misc Interrupt Register
+ * These registers contain the per-queue miscellaneous interrupts.
+ * cptx_vqx_misc_int_s
+ * Word 0
+ *  reserved_5_63:59 [63:5] Reserved.
+ *  swerr:1 [4:4](R/W1C/H) Software error from engines.
+ *  nwrp:1  [3:3](R/W1C/H) NCB result write response error.
+ *  irde:1  [2:2](R/W1C/H) Instruction NCB read response error.
+ *  dovf:1 [1:1](R/W1C/H) Doorbell overflow.
+ *  mbox:1 [0:0](R/W1C/H) PF to VF mailbox interrupt. Set when
+ *	CPT()_VF()_PF_MBOX(0) is written.
+ *
+ */
+union cptx_vqx_misc_int {
+	u64 u;
+	struct cptx_vqx_misc_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_5_63:59;
+		u64 swerr:1;
+		u64 nwrp:1;
+		u64 irde:1;
+		u64 dovf:1;
+		u64 mbox:1;
+#else /* Word 0 - Little Endian */
+		u64 mbox:1;
+		u64 dovf:1;
+		u64 irde:1;
+		u64 nwrp:1;
+		u64 swerr:1;
+		u64 reserved_5_63:59;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_done_ack
+ *
+ * CPT Queue Done Count Ack Registers
+ * This register is written by software to acknowledge interrupts.
+ * cptx_vqx_done_ack_s
+ * Word0
+ *  reserved_20_63:44 [63:20] Reserved.
+ *  done_ack:20 [19:0](R/W/H) Number of decrements to CPT()_VQ()_DONE[DONE].
+ *	Reads CPT()_VQ()_DONE[DONE]. Written by software to acknowledge
+ *	interrupts. If CPT()_VQ()_DONE[DONE] is still nonzero the interrupt
+ *	will be re-sent if the conditions described in CPT()_VQ()_DONE[DONE]
+ *	are satisfied.
+ *
+ */
+union cptx_vqx_done_ack {
+	u64 u;
+	struct cptx_vqx_done_ack_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_20_63:44;
+		u64 done_ack:20;
+#else /* Word 0 - Little Endian */
+		u64 done_ack:20;
+		u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_done
+ *
+ * CPT Queue Done Count Registers
+ * These registers contain the per-queue instruction done count.
+ * cptx_vqx_done_s
+ * Word0
+ *  reserved_20_63:44 [63:20] Reserved.
+ *  done:20 [19:0](R/W/H) Done count. When CPT_INST_S[DONEINT] set and that
+ *	instruction completes, CPT()_VQ()_DONE[DONE] is incremented when the
+ *	instruction finishes. Write to this field are for diagnostic use only;
+ *	instead software writes CPT()_VQ()_DONE_ACK with the number of
+ *	decrements for this field.
+ *	Interrupts are sent as follows:
+ *	* When CPT()_VQ()_DONE[DONE] = 0, then no results are pending, the
+ *	interrupt coalescing timer is held to zero, and an interrupt is not
+ *	sent.
+ *	* When CPT()_VQ()_DONE[DONE] != 0, then the interrupt coalescing timer
+ *	counts. If the counter is >= CPT()_VQ()_DONE_WAIT[TIME_WAIT]*1024, or
+ *	CPT()_VQ()_DONE[DONE] >= CPT()_VQ()_DONE_WAIT[NUM_WAIT], i.e. enough
+ *	time has passed or enough results have arrived, then the interrupt is
+ *	sent.
+ *	* When CPT()_VQ()_DONE_ACK is written (or CPT()_VQ()_DONE is written
+ *	but this is not typical), the interrupt coalescing timer restarts.
+ *	Note after decrementing this interrupt equation is recomputed,
+ *	for example if CPT()_VQ()_DONE[DONE] >= CPT()_VQ()_DONE_WAIT[NUM_WAIT]
+ *	and because the timer is zero, the interrupt will be resent immediately.
+ *	(This covers the race case between software acknowledging an interrupt
+ *	and a result returning.)
+ *	* When CPT()_VQ()_DONE_ENA_W1S[DONE] = 0, interrupts are not sent,
+ *	but the counting described above still occurs.
+ *	Since CPT instructions complete out-of-order, if software is using
+ *	completion interrupts the suggested scheme is to request a DONEINT on
+ *	each request, and when an interrupt arrives perform a "greedy" scan for
+ *	completions; even if a later command is acknowledged first this will
+ *	not result in missing a completion.
+ *	Software is responsible for making sure [DONE] does not overflow;
+ *	for example by insuring there are not more than 2^20-1 instructions in
+ *	flight that may request interrupts.
+ *
+ */
+union cptx_vqx_done {
+	u64 u;
+	struct cptx_vqx_done_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_20_63:44;
+		u64 done:20;
+#else /* Word 0 - Little Endian */
+		u64 done:20;
+		u64 reserved_20_63:44;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_done_wait
+ *
+ * CPT Queue Done Interrupt Coalescing Wait Registers
+ * Specifies the per queue interrupt coalescing settings.
+ * cptx_vqx_done_wait_s
+ * Word0
+ *  reserved_48_63:16 [63:48] Reserved.
+ *  time_wait:16; [47:32](R/W) Time hold-off. When CPT()_VQ()_DONE[DONE] = 0
+ *	or CPT()_VQ()_DONE_ACK is written a timer is cleared. When the timer
+ *	reaches [TIME_WAIT]*1024 then interrupt coalescing ends.
+ *	see CPT()_VQ()_DONE[DONE]. If 0x0, time coalescing is disabled.
+ *  reserved_20_31:12 [31:20] Reserved.
+ *  num_wait:20 [19:0](R/W) Number of messages hold-off.
+ *	When CPT()_VQ()_DONE[DONE] >= [NUM_WAIT] then interrupt coalescing ends
+ *	see CPT()_VQ()_DONE[DONE]. If 0x0, same behavior as 0x1.
+ *
+ */
+union cptx_vqx_done_wait {
+	u64 u;
+	struct cptx_vqx_done_wait_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_48_63:16;
+		u64 time_wait:16;
+		u64 reserved_20_31:12;
+		u64 num_wait:20;
+#else /* Word 0 - Little Endian */
+		u64 num_wait:20;
+		u64 reserved_20_31:12;
+		u64 time_wait:16;
+		u64 reserved_48_63:16;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_done_ena_w1s
+ *
+ * CPT Queue Done Interrupt Enable Set Registers
+ * Write 1 to these registers will enable the DONEINT interrupt for the queue.
+ * cptx_vqx_done_ena_w1s_s
+ * Word0
+ *  reserved_1_63:63 [63:1] Reserved.
+ *  done:1 [0:0](R/W1S/H) Write 1 will enable DONEINT for this queue.
+ *	Write 0 has no effect. Read will return the enable bit.
+ */
+union cptx_vqx_done_ena_w1s {
+	u64 u;
+	struct cptx_vqx_done_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_1_63:63;
+		u64 done:1;
+#else /* Word 0 - Little Endian */
+		u64 done:1;
+		u64 reserved_1_63:63;
+#endif /* Word 0 - End */
+	} s;
+};
+
+/**
+ * Register (NCB) cpt#_vq#_ctl
+ *
+ * CPT VF Queue Control Registers
+ * This register configures queues. This register should be changed (other than
+ * clearing [ENA]) only when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]).
+ * cptx_vqx_ctl_s
+ * Word0
+ *  reserved_1_63:63 [63:1] Reserved.
+ *  ena:1 [0:0](R/W/H) Enables the logical instruction queue.
+ *	See also CPT()_PF_Q()_CTL[CONT_ERR] and	CPT()_VQ()_INPROG[INFLIGHT].
+ *	1 = Queue is enabled.
+ *	0 = Queue is disabled.
+ */
+union cptx_vqx_ctl {
+	u64 u;
+	struct cptx_vqx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+		u64 reserved_1_63:63;
+		u64 ena:1;
+#else /* Word 0 - Little Endian */
+		u64 ena:1;
+		u64 reserved_1_63:63;
+#endif /* Word 0 - End */
+	} s;
+};
+#endif /*__CPT_HW_TYPES_H*/
diff --git a/drivers/crypto/cavium/cpt/cptpf.h b/drivers/crypto/cavium/cpt/cptpf.h
new file mode 100644
index 0000000..c0556c5
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptpf.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __CPTPF_H
+#define __CPTPF_H
+
+#include "cpt_common.h"
+
+#define CSR_DELAY 30
+#define CPT_MAX_CORE_GROUPS 8
+#define CPT_MAX_SE_CORES 10
+#define CPT_MAX_AE_CORES 6
+#define CPT_MAX_TOTAL_CORES (CPT_MAX_SE_CORES + CPT_MAX_AE_CORES)
+#define CPT_MAX_VF_NUM 16
+#define	CPT_PF_MSIX_VECTORS 3
+#define CPT_PF_INT_VEC_E_MBOXX(a) (0x02 + (a))
+#define CPT_UCODE_VERSION_SZ 32
+struct cpt_device;
+
+struct microcode {
+	u8 is_mc_valid;
+	u8 is_ae;
+	u8 group;
+	u8 num_cores;
+	u32 code_size;
+	u64 core_mask;
+	u8 version[CPT_UCODE_VERSION_SZ];
+	/* Base info */
+	dma_addr_t phys_base;
+	void *code;
+};
+
+struct cpt_vf_info {
+	u8 state;
+	u8 priority;
+	u8 id;
+	u32 qlen;
+};
+
+/**
+ * cpt device structure
+ */
+struct cpt_device {
+	u16 flags;	/* Flags to hold device status bits */
+	u8 num_vf_en; /* Number of VFs enabled (0...CPT_MAX_VF_NUM) */
+	struct cpt_vf_info vfinfo[CPT_MAX_VF_NUM]; /* Per VF info */
+
+	void __iomem *reg_base; /* Register start address */
+	struct pci_dev *pdev; /* pci device handle */
+
+	struct microcode mcode[CPT_MAX_CORE_GROUPS];
+	u8 next_mc_idx; /* next microcode index */
+	u8 next_group;
+	u8 max_se_cores;
+	u8 max_ae_cores;
+};
+
+void cpt_mbox_intr_handler(struct cpt_device *cpt, int mbx);
+#endif /* __CPTPF_H */
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
new file mode 100644
index 0000000..06ad85a
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -0,0 +1,675 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/version.h>
+
+#include "cptpf.h"
+
+#define DRV_NAME	"thunder-cpt"
+#define DRV_VERSION	"1.0"
+
+static u32 num_vfs = 4; /* Default 4 VF enabled */
+module_param(num_vfs, uint, 0444);
+MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)");
+
+/*
+ * Disable cores specified by coremask
+ */
+static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
+			      u8 type, u8 grp)
+{
+	u64 pf_exe_ctl;
+	u32 timeout = 100;
+	u64 grpmask = 0;
+	struct device *dev = &cpt->pdev->dev;
+
+	if (type == AE_TYPES)
+		coremask = (coremask << cpt->max_se_cores);
+
+	/* Disengage the cores from groups */
+	grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
+			(grpmask & ~coremask));
+	udelay(CSR_DELAY);
+	grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
+	while (grp & coremask) {
+		dev_err(dev, "Cores still busy %llx", coremask);
+		grp = cpt_read_csr64(cpt->reg_base,
+				     CPTX_PF_EXEC_BUSY(0));
+		if (timeout--)
+			break;
+
+		udelay(CSR_DELAY);
+	}
+
+	/* Disable the cores */
+	pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
+			(pf_exe_ctl & ~coremask));
+	udelay(CSR_DELAY);
+}
+
+/*
+ * Enable cores specified by coremask
+ */
+static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask,
+			     u8 type)
+{
+	u64 pf_exe_ctl;
+
+	if (type == AE_TYPES)
+		coremask = (coremask << cpt->max_se_cores);
+
+	pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
+			(pf_exe_ctl | coremask));
+	udelay(CSR_DELAY);
+}
+
+static void cpt_configure_group(struct cpt_device *cpt, u8 grp,
+				u64 coremask, u8 type)
+{
+	u64 pf_gx_en = 0;
+
+	if (type == AE_TYPES)
+		coremask = (coremask << cpt->max_se_cores);
+
+	pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
+			(pf_gx_en | coremask));
+	udelay(CSR_DELAY);
+}
+
+static void cpt_disable_mbox_interrupts(struct cpt_device *cpt)
+{
+	/* Clear mbox(0) interupts for all vfs */
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull);
+}
+
+static void cpt_disable_ecc_interrupts(struct cpt_device *cpt)
+{
+	/* Clear ecc(0) interupts for all vfs */
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull);
+}
+
+static void cpt_disable_exec_interrupts(struct cpt_device *cpt)
+{
+	/* Clear exec interupts for all vfs */
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull);
+}
+
+static void cpt_disable_all_interrupts(struct cpt_device *cpt)
+{
+	cpt_disable_mbox_interrupts(cpt);
+	cpt_disable_ecc_interrupts(cpt);
+	cpt_disable_exec_interrupts(cpt);
+}
+
+static void cpt_enable_mbox_interrupts(struct cpt_device *cpt)
+{
+	/* Set mbox(0) interupts for all vfs */
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull);
+}
+
+static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode)
+{
+	int ret = 0, core = 0, shift = 0;
+	u32 total_cores = 0;
+	struct device *dev = &cpt->pdev->dev;
+
+	if (!mcode || !mcode->code) {
+		dev_err(dev, "Either the mcode is null or data is NULL\n");
+		return -EINVAL;
+	}
+
+	if (mcode->code_size == 0) {
+		dev_err(dev, "microcode size is 0\n");
+		return -EINVAL;
+	}
+
+	/* Assumes 0-9 are SE cores for UCODE_BASE registers and
+	 * AE core bases follow
+	 */
+	if (mcode->is_ae) {
+		core = CPT_MAX_SE_CORES; /* start couting from 10 */
+		total_cores = CPT_MAX_TOTAL_CORES; /* upto 15 */
+	} else {
+		core = 0; /* start couting from 0 */
+		total_cores = CPT_MAX_SE_CORES; /* upto 9 */
+	}
+
+	/* Point to microcode for each core of the group */
+	for (; core < total_cores ; core++, shift++) {
+		if (mcode->core_mask & (1 << shift)) {
+			cpt_write_csr64(cpt->reg_base,
+					CPTX_PF_ENGX_UCODE_BASE(0, core),
+					(u64)mcode->phys_base);
+		}
+	}
+	return ret;
+}
+
+static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode)
+{
+	int ret = 0;
+	struct device *dev = &cpt->pdev->dev;
+
+	/* Make device not ready */
+	cpt->flags &= ~CPT_FLAG_DEVICE_READY;
+	/* Disable All PF interrupts */
+	cpt_disable_all_interrupts(cpt);
+	/* Calculate mcode group and coremasks */
+	if (mcode->is_ae) {
+		if (mcode->num_cores > cpt->max_ae_cores) {
+			dev_err(dev, "Requested for more cores than available AE cores\n");
+			ret = -EINVAL;
+			goto cpt_init_fail;
+		}
+
+		if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
+			dev_err(dev, "Can't load, all eight microcode groups in use");
+			return -ENFILE;
+		}
+
+		mcode->group = cpt->next_group;
+		/* Convert requested cores to mask */
+		mcode->core_mask = GENMASK(mcode->num_cores, 0);
+		cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES,
+				  mcode->group);
+		/* Load microcode for AE engines */
+		ret = cpt_load_microcode(cpt, mcode);
+		if (ret) {
+			dev_err(dev, "Microcode load Failed for %s\n",
+				mcode->version);
+			goto cpt_init_fail;
+		}
+		cpt->next_group++;
+		/* Configure group mask for the mcode */
+		cpt_configure_group(cpt, mcode->group, mcode->core_mask,
+				    AE_TYPES);
+		/* Enable AE cores for the group mask */
+		cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES);
+	} else {
+		if (mcode->num_cores > cpt->max_se_cores) {
+			dev_err(dev, "Requested for more cores than available SE cores\n");
+			ret = -EINVAL;
+			goto cpt_init_fail;
+		}
+		if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
+			dev_err(dev, "Can't load, all eight microcode groups in use");
+			return -ENFILE;
+		}
+
+		mcode->group = cpt->next_group;
+		/* Covert requested cores to mask */
+		mcode->core_mask = GENMASK(mcode->num_cores, 0);
+		cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES,
+				  mcode->group);
+		/* Load microcode for SE engines */
+		ret = cpt_load_microcode(cpt, mcode);
+		if (ret) {
+			dev_err(dev, "Microcode load Failed for %s\n",
+				mcode->version);
+			goto cpt_init_fail;
+		}
+		cpt->next_group++;
+		/* Configure group mask for the mcode */
+		cpt_configure_group(cpt, mcode->group, mcode->core_mask,
+				    SE_TYPES);
+		/* Enable SE cores for the group mask */
+		cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES);
+	}
+
+	/* Enabled PF mailbox interrupts */
+	cpt_enable_mbox_interrupts(cpt);
+	cpt->flags |= CPT_FLAG_DEVICE_READY;
+
+	return ret;
+
+cpt_init_fail:
+	/* Enabled PF mailbox interrupts */
+	cpt_enable_mbox_interrupts(cpt);
+
+	return ret;
+}
+
+struct ucode_header {
+	u8 version[CPT_UCODE_VERSION_SZ];
+	u32 code_length;
+	u32 data_length;
+	u64 sram_address;
+};
+
+static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
+{
+	const struct firmware *fw_entry;
+	struct device *dev = &cpt->pdev->dev;
+	struct ucode_header *ucode;
+	struct microcode *mcode;
+	int j, ret = 0;
+
+	ret = request_firmware(&fw_entry, fw, dev);
+	if (ret)
+		return ret;
+
+	ucode = (struct ucode_header *)fw_entry->data;
+	mcode = &cpt->mcode[cpt->next_mc_idx];
+	memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
+	mcode->code_size = ntohl(ucode->code_length) * 2;
+	if (!mcode->code_size) {
+		ret = -EINVAL;
+		goto fw_release;
+	}
+
+	mcode->is_ae = is_ae;
+	mcode->core_mask = 0ULL;
+	mcode->num_cores = is_ae ? 6 : 10;
+
+	/*  Allocate DMAable space */
+	mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size,
+					  &mcode->phys_base, GFP_KERNEL);
+	if (!mcode->code) {
+		dev_err(dev, "Unable to allocate space for microcode");
+		ret = -ENOMEM;
+		goto fw_release;
+	}
+
+	memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)),
+	       mcode->code_size);
+
+	/* Byte swap 64-bit */
+	for (j = 0; j < (mcode->code_size / 8); j++)
+		((u64 *)mcode->code)[j] = cpu_to_be64(((u64 *)mcode->code)[j]);
+	/*  MC needs 16-bit swap */
+	for (j = 0; j < (mcode->code_size / 2); j++)
+		((u16 *)mcode->code)[j] = cpu_to_be16(((u16 *)mcode->code)[j]);
+
+	dev_dbg(dev, "mcode->code_size = %u\n", mcode->code_size);
+	dev_dbg(dev, "mcode->is_ae = %u\n", mcode->is_ae);
+	dev_dbg(dev, "mcode->num_cores = %u\n", mcode->num_cores);
+	dev_dbg(dev, "mcode->code = %llx\n", (u64)mcode->code);
+	dev_dbg(dev, "mcode->phys_base = %llx\n", mcode->phys_base);
+
+	ret = do_cpt_init(cpt, mcode);
+	if (ret) {
+		dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
+		goto fw_release;
+	}
+
+	dev_info(dev, "Microcode Loaded %s\n", mcode->version);
+	mcode->is_mc_valid = 1;
+	cpt->next_mc_idx++;
+
+fw_release:
+	release_firmware(fw_entry);
+
+	return ret;
+}
+
+static int cpt_ucode_load(struct cpt_device *cpt)
+{
+	int ret = 0;
+	struct device *dev = &cpt->pdev->dev;
+
+	ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true);
+	if (ret) {
+		dev_err(dev, "ae:cpt_ucode_load failed with ret: %d\n", ret);
+		return ret;
+	}
+	ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false);
+	if (ret) {
+		dev_err(dev, "se:cpt_ucode_load failed with ret: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static irqreturn_t cpt_mbx0_intr_handler(int irq, void *cpt_irq)
+{
+	struct cpt_device *cpt = (struct cpt_device *)cpt_irq;
+
+	cpt_mbox_intr_handler(cpt, 0);
+
+	return IRQ_HANDLED;
+}
+
+static void cpt_reset(struct cpt_device *cpt)
+{
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1);
+}
+
+static void cpt_find_max_enabled_cores(struct cpt_device *cpt)
+{
+	union cptx_pf_constants pf_cnsts = {0};
+
+	pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0));
+	cpt->max_se_cores = pf_cnsts.s.se;
+	cpt->max_ae_cores = pf_cnsts.s.ae;
+}
+
+static u32 cpt_check_bist_status(struct cpt_device *cpt)
+{
+	union cptx_pf_bist_status bist_sts = {0};
+
+	bist_sts.u = cpt_read_csr64(cpt->reg_base,
+				    CPTX_PF_BIST_STATUS(0));
+
+	return bist_sts.u;
+}
+
+static u64 cpt_check_exe_bist_status(struct cpt_device *cpt)
+{
+	union cptx_pf_exe_bist_status bist_sts = {0};
+
+	bist_sts.u = cpt_read_csr64(cpt->reg_base,
+				    CPTX_PF_EXE_BIST_STATUS(0));
+
+	return bist_sts.u;
+}
+
+static void cpt_disable_all_cores(struct cpt_device *cpt)
+{
+	u32 grp, timeout = 100;
+	struct device *dev = &cpt->pdev->dev;
+
+	/* Disengage the cores from groups */
+	for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
+		cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0);
+		udelay(CSR_DELAY);
+	}
+
+	grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
+	while (grp) {
+		dev_err(dev, "Cores still busy");
+		grp = cpt_read_csr64(cpt->reg_base,
+				     CPTX_PF_EXEC_BUSY(0));
+		if (timeout--)
+			break;
+
+		udelay(CSR_DELAY);
+	}
+	/* Disable the cores */
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
+}
+
+/**
+ * Ensure all cores are disengaged from all groups by
+ * calling cpt_disable_all_cores() before calling this
+ * function.
+ */
+static void cpt_unload_microcode(struct cpt_device *cpt)
+{
+	u32 grp = 0, core;
+
+	/* Free microcode bases and reset group masks */
+	for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
+		struct microcode *mcode = &cpt->mcode[grp];
+
+		if (cpt->mcode[grp].code)
+			dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
+					  mcode->code, mcode->phys_base);
+		mcode->code = NULL;
+	}
+	/* Clear UCODE_BASE registers for all engines */
+	for (core = 0; core < CPT_MAX_TOTAL_CORES; core++)
+		cpt_write_csr64(cpt->reg_base,
+				CPTX_PF_ENGX_UCODE_BASE(0, core), 0ull);
+}
+
+static int cpt_device_init(struct cpt_device *cpt)
+{
+	u64 bist;
+	struct device *dev = &cpt->pdev->dev;
+
+	/* Reset the PF when probed first */
+	cpt_reset(cpt);
+	msleep(100);
+
+	/*Check BIST status*/
+	bist = (u64)cpt_check_bist_status(cpt);
+	if (bist) {
+		dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
+		return -ENODEV;
+	}
+
+	bist = cpt_check_exe_bist_status(cpt);
+	if (bist) {
+		dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
+		return -ENODEV;
+	}
+
+	/*Get CLK frequency*/
+	/*Get max enabled cores */
+	cpt_find_max_enabled_cores(cpt);
+	/*Disable all cores*/
+	cpt_disable_all_cores(cpt);
+	/*Reset device parameters*/
+	cpt->next_mc_idx   = 0;
+	cpt->next_group = 0;
+	/* PF is ready */
+	cpt->flags |= CPT_FLAG_DEVICE_READY;
+
+	return 0;
+}
+
+static int cpt_register_interrupts(struct cpt_device *cpt)
+{
+	int ret;
+	struct device *dev = &cpt->pdev->dev;
+
+	/* Enable MSI-X */
+	ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS,
+			CPT_PF_MSIX_VECTORS, PCI_IRQ_MSIX);
+	if (ret < 0) {
+		dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n",
+			CPT_PF_MSIX_VECTORS);
+		return ret;
+	}
+
+	/* Register mailbox interrupt handlers */
+	ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)),
+			  cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
+	if (ret)
+		goto fail;
+
+	/* Enable mailbox interrupt */
+	cpt_enable_mbox_interrupts(cpt);
+	return 0;
+
+fail:
+	dev_err(dev, "Request irq failed\n");
+	pci_disable_msix(cpt->pdev);
+	return ret;
+}
+
+static void cpt_unregister_interrupts(struct cpt_device *cpt)
+{
+	free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt);
+	pci_disable_msix(cpt->pdev);
+}
+
+static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs)
+{
+	int pos = 0;
+	int err;
+	u16 total_vf_cnt;
+	struct pci_dev *pdev = cpt->pdev;
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+	if (!pos) {
+		dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
+		return -ENODEV;
+	}
+
+	cpt->num_vf_en = num_vfs; /* User requested VFs */
+	pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
+	if (total_vf_cnt < cpt->num_vf_en)
+		cpt->num_vf_en = total_vf_cnt;
+
+	if (!total_vf_cnt)
+		return 0;
+
+	/*Enabled the available VFs */
+	err = pci_enable_sriov(pdev, cpt->num_vf_en);
+	if (err) {
+		dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
+			cpt->num_vf_en);
+		cpt->num_vf_en = 0;
+		return err;
+	}
+
+	/* TODO: Optionally enable static VQ priorities feature */
+
+	dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
+		 cpt->num_vf_en);
+
+	cpt->flags |= CPT_FLAG_SRIOV_ENABLED;
+
+	return 0;
+}
+
+static int cpt_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct cpt_device *cpt;
+	int err;
+
+	if (num_vfs > 16 || num_vfs < 4) {
+		dev_warn(dev, "Invalid vf count %d, Resetting it to 4(default)\n",
+			 num_vfs);
+		num_vfs = 4;
+	}
+
+	cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
+	if (!cpt)
+		return -ENOMEM;
+
+	pci_set_drvdata(pdev, cpt);
+	cpt->pdev = pdev;
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Failed to enable PCI device\n");
+		pci_set_drvdata(pdev, NULL);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(dev, "PCI request regions failed 0x%x\n", err);
+		goto cpt_err_disable_device;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get usable DMA configuration\n");
+		goto cpt_err_release_regions;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+		goto cpt_err_release_regions;
+	}
+
+	/* MAP PF's configuration registers */
+	cpt->reg_base = pcim_iomap(pdev, 0, 0);
+	if (!cpt->reg_base) {
+		dev_err(dev, "Cannot map config register space, aborting\n");
+		err = -ENOMEM;
+		goto cpt_err_release_regions;
+	}
+
+	/* CPT device HW initialization */
+	cpt_device_init(cpt);
+
+	/* Register interrupts */
+	err = cpt_register_interrupts(cpt);
+	if (err)
+		goto cpt_err_release_regions;
+
+	err = cpt_ucode_load(cpt);
+	if (err)
+		goto cpt_err_unregister_interrupts;
+
+	/* Configure SRIOV */
+	err = cpt_sriov_init(cpt, num_vfs);
+	if (err)
+		goto cpt_err_unregister_interrupts;
+
+	return 0;
+
+cpt_err_unregister_interrupts:
+	cpt_unregister_interrupts(cpt);
+cpt_err_release_regions:
+	pci_release_regions(pdev);
+cpt_err_disable_device:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static void cpt_remove(struct pci_dev *pdev)
+{
+	struct cpt_device *cpt = pci_get_drvdata(pdev);
+
+	/* Disengage SE and AE cores from all groups*/
+	cpt_disable_all_cores(cpt);
+	/* Unload microcodes */
+	cpt_unload_microcode(cpt);
+	cpt_unregister_interrupts(cpt);
+	pci_disable_sriov(pdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static void cpt_shutdown(struct pci_dev *pdev)
+{
+	struct cpt_device *cpt = pci_get_drvdata(pdev);
+
+	if (!cpt)
+		return;
+
+	dev_info(&pdev->dev, "Shutdown device %x:%x.\n",
+		 (u32)pdev->vendor, (u32)pdev->device);
+
+	cpt_unregister_interrupts(cpt);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id cpt_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_PF_DEVICE_ID) },
+	{ 0, }  /* end of table */
+};
+
+static struct pci_driver cpt_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = cpt_id_table,
+	.probe = cpt_probe,
+	.remove = cpt_remove,
+	.shutdown = cpt_shutdown,
+};
+
+module_pci_driver(cpt_pci_driver);
+
+MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
+MODULE_DESCRIPTION("Cavium Thunder CPT Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cpt_id_table);
diff --git a/drivers/crypto/cavium/cpt/cptpf_mbox.c b/drivers/crypto/cavium/cpt/cptpf_mbox.c
new file mode 100644
index 0000000..20f2c6e
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptpf_mbox.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include "cptpf.h"
+
+static void cpt_send_msg_to_vf(struct cpt_device *cpt, int vf,
+			       struct cpt_mbox *mbx)
+{
+	/* Writing mbox(0) causes interrupt */
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1),
+			mbx->data);
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), mbx->msg);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void cpt_mbox_send_ack(struct cpt_device *cpt, int vf,
+			      struct cpt_mbox *mbx)
+{
+	mbx->data = 0ull;
+	mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
+	cpt_send_msg_to_vf(cpt, vf, mbx);
+}
+
+static void cpt_clear_mbox_intr(struct cpt_device *cpt, u32 vf)
+{
+	/* W1C for the VF */
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0), (1 << vf));
+}
+
+/*
+ *  Configure QLEN/Chunk sizes for VF
+ */
+static void cpt_cfg_qlen_for_vf(struct cpt_device *cpt, int vf, u32 size)
+{
+	union cptx_pf_qx_ctl pf_qx_ctl;
+
+	pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf));
+	pf_qx_ctl.s.size = size;
+	pf_qx_ctl.s.cont_err = true;
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u);
+}
+
+/*
+ * Configure VQ priority
+ */
+static void cpt_cfg_vq_priority(struct cpt_device *cpt, int vf, u32 pri)
+{
+	union cptx_pf_qx_ctl pf_qx_ctl;
+
+	pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf));
+	pf_qx_ctl.s.pri = pri;
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u);
+}
+
+static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
+{
+	struct microcode *mcode = cpt->mcode;
+	union cptx_pf_qx_ctl pf_qx_ctl;
+	struct device *dev = &cpt->pdev->dev;
+
+	if (q >= CPT_MAX_VF_NUM) {
+		dev_err(dev, "Queues are more than cores in the group");
+		return -EINVAL;
+	}
+	if (grp >= CPT_MAX_CORE_GROUPS) {
+		dev_err(dev, "Request group is more than possible groups");
+		return -EINVAL;
+	}
+	if (grp >= cpt->next_mc_idx) {
+		dev_err(dev, "Request group is higher than available functional groups");
+		return -EINVAL;
+	}
+	pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q));
+	pf_qx_ctl.s.grp = mcode[grp].group;
+	cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q), pf_qx_ctl.u);
+	dev_dbg(dev, "VF %d TYPE %s", q, (mcode[grp].is_ae ? "AE" : "SE"));
+
+	return mcode[grp].is_ae ? AE_TYPES : SE_TYPES;
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void cpt_handle_mbox_intr(struct cpt_device *cpt, int vf)
+{
+	struct cpt_vf_info *vfx = &cpt->vfinfo[vf];
+	struct cpt_mbox mbx = {};
+	int vftype;
+	struct device *dev = &cpt->pdev->dev;
+	/*
+	 * MBOX[0] contains msg
+	 * MBOX[1] contains data
+	 */
+	mbx.msg  = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0));
+	mbx.data = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1));
+	dev_dbg(dev, "%s: Mailbox msg 0x%llx from VF%d", __func__, mbx.msg, vf);
+	switch (mbx.msg) {
+	case CPT_MSG_VF_UP:
+		vfx->state = VF_STATE_UP;
+		try_module_get(THIS_MODULE);
+		cpt_mbox_send_ack(cpt, vf, &mbx);
+		break;
+	case CPT_MSG_READY:
+		mbx.msg  = CPT_MSG_READY;
+		mbx.data = vf;
+		cpt_send_msg_to_vf(cpt, vf, &mbx);
+		break;
+	case CPT_MSG_VF_DOWN:
+		/* First msg in VF teardown sequence */
+		vfx->state = VF_STATE_DOWN;
+		module_put(THIS_MODULE);
+		cpt_mbox_send_ack(cpt, vf, &mbx);
+		break;
+	case CPT_MSG_QLEN:
+		vfx->qlen = mbx.data;
+		cpt_cfg_qlen_for_vf(cpt, vf, vfx->qlen);
+		cpt_mbox_send_ack(cpt, vf, &mbx);
+		break;
+	case CPT_MSG_QBIND_GRP:
+		vftype = cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
+		if ((vftype != AE_TYPES) && (vftype != SE_TYPES))
+			dev_err(dev, "Queue %d binding to group %llu failed",
+				vf, mbx.data);
+		else {
+			dev_dbg(dev, "Queue %d binding to group %llu successful",
+				vf, mbx.data);
+			mbx.msg = CPT_MSG_QBIND_GRP;
+			mbx.data = vftype;
+			cpt_send_msg_to_vf(cpt, vf, &mbx);
+		}
+		break;
+	case CPT_MSG_VQ_PRIORITY:
+		vfx->priority = mbx.data;
+		cpt_cfg_vq_priority(cpt, vf, vfx->priority);
+		cpt_mbox_send_ack(cpt, vf, &mbx);
+		break;
+	default:
+		dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
+			vf, mbx.msg);
+		break;
+	}
+}
+
+void cpt_mbox_intr_handler (struct cpt_device *cpt, int mbx)
+{
+	u64 intr;
+	u8  vf;
+
+	intr = cpt_read_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0));
+	dev_dbg(&cpt->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
+	for (vf = 0; vf < CPT_MAX_VF_NUM; vf++) {
+		if (intr & (1ULL << vf)) {
+			dev_dbg(&cpt->pdev->dev, "Intr from VF %d\n", vf);
+			cpt_handle_mbox_intr(cpt, vf);
+			cpt_clear_mbox_intr(cpt, vf);
+		}
+	}
+}
diff --git a/drivers/crypto/cavium/cpt/cptvf.h b/drivers/crypto/cavium/cpt/cptvf.h
new file mode 100644
index 0000000..0a835a0
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptvf.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __CPTVF_H
+#define __CPTVF_H
+
+#include <linux/list.h>
+#include "cpt_common.h"
+
+/* Default command queue length */
+#define CPT_CMD_QLEN 2046
+#define CPT_CMD_QCHUNK_SIZE 1023
+
+/* Default command timeout in seconds */
+#define CPT_COMMAND_TIMEOUT 4
+#define CPT_TIMER_THOLD	0xFFFF
+#define CPT_NUM_QS_PER_VF 1
+#define CPT_INST_SIZE 64
+#define CPT_NEXT_CHUNK_PTR_SIZE 8
+
+#define	CPT_VF_MSIX_VECTORS 2
+#define CPT_VF_INTR_MBOX_MASK BIT(0)
+#define CPT_VF_INTR_DOVF_MASK BIT(1)
+#define CPT_VF_INTR_IRDE_MASK BIT(2)
+#define CPT_VF_INTR_NWRP_MASK BIT(3)
+#define CPT_VF_INTR_SERR_MASK BIT(4)
+#define DMA_DIRECT_DIRECT 0 /* Input DIRECT, Output DIRECT */
+#define DMA_GATHER_SCATTER 1
+#define FROM_DPTR 1
+
+/**
+ * Enumeration cpt_vf_int_vec_e
+ *
+ * CPT VF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum cpt_vf_int_vec_e {
+	CPT_VF_INT_VEC_E_MISC = 0x00,
+	CPT_VF_INT_VEC_E_DONE = 0x01
+};
+
+struct command_chunk {
+	u8 *head;
+	dma_addr_t dma_addr;
+	u32 size; /* Chunk size, max CPT_INST_CHUNK_MAX_SIZE */
+	struct hlist_node nextchunk;
+};
+
+struct command_queue {
+	spinlock_t lock; /* command queue lock */
+	u32 idx; /* Command queue host write idx */
+	u32 nchunks; /* Number of command chunks */
+	struct command_chunk *qhead;	/* Command queue head, instructions
+					 * are inserted here
+					 */
+	struct hlist_head chead;
+};
+
+struct command_qinfo {
+	u32 cmd_size;
+	u32 qchunksize; /* Command queue chunk size */
+	struct command_queue queue[CPT_NUM_QS_PER_VF];
+};
+
+struct pending_entry {
+	u8 busy; /* Entry status (free/busy) */
+
+	volatile u64 *completion_addr; /* Completion address */
+	void *post_arg;
+	void (*callback)(int, void *); /* Kernel ASYNC request callabck */
+	void *callback_arg; /* Kernel ASYNC request callabck arg */
+};
+
+struct pending_queue {
+	struct pending_entry *head;	/* head of the queue */
+	u32 front; /* Process work from here */
+	u32 rear; /* Append new work here */
+	atomic64_t pending_count;
+	spinlock_t lock; /* Queue lock */
+};
+
+struct pending_qinfo {
+	u32 nr_queues;	/* Number of queues supported */
+	u32 qlen; /* Queue length */
+	struct pending_queue queue[CPT_NUM_QS_PER_VF];
+};
+
+#define for_each_pending_queue(qinfo, q, i)	\
+	for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \
+	     q = &qinfo->queue[i])
+
+struct cpt_vf {
+	u16 flags; /* Flags to hold device status bits */
+	u8 vfid; /* Device Index 0...CPT_MAX_VF_NUM */
+	u8 vftype; /* VF type of SE_TYPE(1) or AE_TYPE(1) */
+	u8 vfgrp; /* VF group (0 - 8) */
+	u8 node; /* Operating node: Bits (46:44) in BAR0 address */
+	u8 priority; /* VF priority ring: 1-High proirity round
+		      * robin ring;0-Low priority round robin ring;
+		      */
+	struct pci_dev *pdev; /* pci device handle */
+	void __iomem *reg_base; /* Register start address */
+	void *wqe_info;	/* BH worker info */
+	/* MSI-X */
+	cpumask_var_t affinity_mask[CPT_VF_MSIX_VECTORS];
+	/* Command and Pending queues */
+	u32 qsize;
+	u32 nr_queues;
+	struct command_qinfo cqinfo; /* Command queue information */
+	struct pending_qinfo pqinfo; /* Pending queue information */
+	/* VF-PF mailbox communication */
+	bool pf_acked;
+	bool pf_nacked;
+};
+
+int cptvf_send_vf_up(struct cpt_vf *cptvf);
+int cptvf_send_vf_down(struct cpt_vf *cptvf);
+int cptvf_send_vf_to_grp_msg(struct cpt_vf *cptvf);
+int cptvf_send_vf_priority_msg(struct cpt_vf *cptvf);
+int cptvf_send_vq_size_msg(struct cpt_vf *cptvf);
+int cptvf_check_pf_ready(struct cpt_vf *cptvf);
+void cptvf_handle_mbox_intr(struct cpt_vf *cptvf);
+void cvm_crypto_exit(void);
+int cvm_crypto_init(struct cpt_vf *cptvf);
+void vq_post_process(struct cpt_vf *cptvf, u32 qno);
+void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val);
+#endif /* __CPTVF_H */
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
new file mode 100644
index 0000000..600336d
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -0,0 +1,524 @@
+
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/cryptd.h>
+#include <crypto/crypto_wq.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+
+#include "cptvf.h"
+#include "cptvf_algs.h"
+
+struct cpt_device_handle {
+	void *cdev[MAX_DEVICES];
+	u32 dev_count;
+};
+
+static struct cpt_device_handle dev_handle;
+
+static void cvm_callback(u32 status, void *arg)
+{
+	struct crypto_async_request *req = (struct crypto_async_request *)arg;
+
+	req->complete(req, !status);
+}
+
+static inline void update_input_iv(struct cpt_request_info *req_info,
+				   u8 *iv, u32 enc_iv_len,
+				   u32 *argcnt)
+{
+	/* Setting the iv information */
+	req_info->in[*argcnt].vptr = (void *)iv;
+	req_info->in[*argcnt].size = enc_iv_len;
+	req_info->req.dlen += enc_iv_len;
+
+	++(*argcnt);
+}
+
+static inline void update_output_iv(struct cpt_request_info *req_info,
+				    u8 *iv, u32 enc_iv_len,
+				    u32 *argcnt)
+{
+	/* Setting the iv information */
+	req_info->out[*argcnt].vptr = (void *)iv;
+	req_info->out[*argcnt].size = enc_iv_len;
+	req_info->rlen += enc_iv_len;
+
+	++(*argcnt);
+}
+
+static inline void update_input_data(struct cpt_request_info *req_info,
+				     struct scatterlist *inp_sg,
+				     u32 nbytes, u32 *argcnt)
+{
+	req_info->req.dlen += nbytes;
+
+	while (nbytes) {
+		u32 len = min(nbytes, inp_sg->length);
+		u8 *ptr = sg_virt(inp_sg);
+
+		req_info->in[*argcnt].vptr = (void *)ptr;
+		req_info->in[*argcnt].size = len;
+		nbytes -= len;
+
+		++(*argcnt);
+		++inp_sg;
+	}
+}
+
+static inline void update_output_data(struct cpt_request_info *req_info,
+				      struct scatterlist *outp_sg,
+				      u32 nbytes, u32 *argcnt)
+{
+	req_info->rlen += nbytes;
+
+	while (nbytes) {
+		u32 len = min(nbytes, outp_sg->length);
+		u8 *ptr = sg_virt(outp_sg);
+
+		req_info->out[*argcnt].vptr = (void *)ptr;
+		req_info->out[*argcnt].size = len;
+		nbytes -= len;
+		++(*argcnt);
+		++outp_sg;
+	}
+}
+
+static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
+				 u32 *argcnt)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+	struct fc_context *fctx = &rctx->fctx;
+	u64 *offset_control = &rctx->control_word;
+	u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
+	struct cpt_request_info *req_info = &rctx->cpt_req;
+	u64 *ctrl_flags = NULL;
+
+	req_info->ctrl.s.grp = 0;
+	req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
+	req_info->ctrl.s.se_req = SE_CORE_REQ;
+
+	req_info->req.opcode.s.major = MAJOR_OP_FC |
+					DMA_MODE_FLAG(DMA_GATHER_SCATTER);
+	if (enc)
+		req_info->req.opcode.s.minor = 2;
+	else
+		req_info->req.opcode.s.minor = 3;
+
+	req_info->req.param1 = req->nbytes; /* Encryption Data length */
+	req_info->req.param2 = 0; /*Auth data length */
+
+	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+	fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
+
+	if (ctx->cipher_type == AES_XTS)
+		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
+	else
+		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
+	ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
+	*ctrl_flags = cpu_to_be64(*ctrl_flags);
+
+	*offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
+	/* Storing  Packet Data Information in offset
+	 * Control Word First 8 bytes
+	 */
+	req_info->in[*argcnt].vptr = (u8 *)offset_control;
+	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+	req_info->req.dlen += CONTROL_WORD_LEN;
+	++(*argcnt);
+
+	req_info->in[*argcnt].vptr = (u8 *)fctx;
+	req_info->in[*argcnt].size = sizeof(struct fc_context);
+	req_info->req.dlen += sizeof(struct fc_context);
+
+	++(*argcnt);
+
+	return 0;
+}
+
+static inline u32 create_input_list(struct ablkcipher_request  *req, u32 enc,
+				    u32 enc_iv_len)
+{
+	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+	struct cpt_request_info *req_info = &rctx->cpt_req;
+	u32 argcnt =  0;
+
+	create_ctx_hdr(req, enc, &argcnt);
+	update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
+	update_input_data(req_info, req->src, req->nbytes, &argcnt);
+	req_info->incnt = argcnt;
+
+	return 0;
+}
+
+static inline void store_cb_info(struct ablkcipher_request *req,
+				 struct cpt_request_info *req_info)
+{
+	req_info->callback = (void *)cvm_callback;
+	req_info->callback_arg = (void *)&req->base;
+}
+
+static inline void create_output_list(struct ablkcipher_request *req,
+				      u32 enc_iv_len)
+{
+	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+	struct cpt_request_info *req_info = &rctx->cpt_req;
+	u32 argcnt = 0;
+
+	/* OUTPUT Buffer Processing
+	 * AES encryption/decryption output would be
+	 * received in the following format
+	 *
+	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
+	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
+	 */
+	/* Reading IV information */
+	update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
+	update_output_data(req_info, req->dst, req->nbytes, &argcnt);
+	req_info->outcnt = argcnt;
+}
+
+static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
+	u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
+	struct fc_context *fctx = &rctx->fctx;
+	struct cpt_request_info *req_info = &rctx->cpt_req;
+	void *cdev = NULL;
+	int status;
+
+	memset(req_info, 0, sizeof(struct cpt_request_info));
+	memset(fctx, 0, sizeof(struct fc_context));
+	create_input_list(req, enc, enc_iv_len);
+	create_output_list(req, enc_iv_len);
+	store_cb_info(req, req_info);
+	cdev = dev_handle.cdev[smp_processor_id()];
+	status = cptvf_do_request(cdev, req_info);
+	/* We perform an asynchronous send and once
+	 * the request is completed the driver would
+	 * intimate through  registered call back functions
+	 */
+
+	if (status)
+		return status;
+	else
+		return -EINPROGRESS;
+}
+
+static int cvm_encrypt(struct ablkcipher_request *req)
+{
+	return cvm_enc_dec(req, true);
+}
+
+static int cvm_decrypt(struct ablkcipher_request *req)
+{
+	return cvm_enc_dec(req, false);
+}
+
+static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+		   u32 keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+	int err;
+	const u8 *key1 = key;
+	const u8 *key2 = key + (keylen / 2);
+
+	err = xts_check_key(tfm, key, keylen);
+	if (err)
+		return err;
+	ctx->key_len = keylen;
+	memcpy(ctx->enc_key, key1, keylen / 2);
+	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
+	ctx->cipher_type = AES_XTS;
+	switch (ctx->key_len) {
+	case 32:
+		ctx->key_type = AES_128_BIT;
+		break;
+	case 64:
+		ctx->key_type = AES_256_BIT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
+{
+	if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
+		ctx->key_len = keylen;
+		switch (ctx->key_len) {
+		case 16:
+			ctx->key_type = AES_128_BIT;
+			break;
+		case 24:
+			ctx->key_type = AES_192_BIT;
+			break;
+		case 32:
+			ctx->key_type = AES_256_BIT;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (ctx->cipher_type == DES3_CBC)
+			ctx->key_type = 0;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+		      u32 keylen, u8 cipher_type)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->cipher_type = cipher_type;
+	if (!cvm_validate_keylen(ctx, keylen)) {
+		memcpy(ctx->enc_key, key, keylen);
+		return 0;
+	} else {
+		crypto_ablkcipher_set_flags(cipher,
+					    CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+}
+
+static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			      u32 keylen)
+{
+	return cvm_setkey(cipher, key, keylen, AES_CBC);
+}
+
+static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			      u32 keylen)
+{
+	return cvm_setkey(cipher, key, keylen, AES_ECB);
+}
+
+static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			      u32 keylen)
+{
+	return cvm_setkey(cipher, key, keylen, AES_CFB);
+}
+
+static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			       u32 keylen)
+{
+	return cvm_setkey(cipher, key, keylen, DES3_CBC);
+}
+
+static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			       u32 keylen)
+{
+	return cvm_setkey(cipher, key, keylen, DES3_ECB);
+}
+
+static int cvm_enc_dec_init(struct crypto_tfm *tfm)
+{
+	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	memset(ctx, 0, sizeof(*ctx));
+	tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
+					sizeof(struct ablkcipher_request);
+	/* Additional memory for ablkcipher_request is
+	 * allocated since the cryptd daemon uses
+	 * this memory for request_ctx information
+	 */
+
+	return 0;
+}
+
+static struct crypto_alg algs[] = { {
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+	.cra_alignmask = 7,
+	.cra_priority = 4001,
+	.cra_name = "xts(aes)",
+	.cra_driver_name = "cavium-xts-aes",
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_u = {
+		.ablkcipher = {
+			.ivsize = AES_BLOCK_SIZE,
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.setkey = cvm_xts_setkey,
+			.encrypt = cvm_encrypt,
+			.decrypt = cvm_decrypt,
+		},
+	},
+	.cra_init = cvm_enc_dec_init,
+	.cra_module = THIS_MODULE,
+}, {
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+	.cra_alignmask = 7,
+	.cra_priority = 4001,
+	.cra_name = "cbc(aes)",
+	.cra_driver_name = "cavium-cbc-aes",
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_u = {
+		.ablkcipher = {
+			.ivsize = AES_BLOCK_SIZE,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.setkey = cvm_cbc_aes_setkey,
+			.encrypt = cvm_encrypt,
+			.decrypt = cvm_decrypt,
+		},
+	},
+	.cra_init = cvm_enc_dec_init,
+	.cra_module = THIS_MODULE,
+}, {
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+	.cra_alignmask = 7,
+	.cra_priority = 4001,
+	.cra_name = "ecb(aes)",
+	.cra_driver_name = "cavium-ecb-aes",
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_u = {
+		.ablkcipher = {
+			.ivsize = AES_BLOCK_SIZE,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.setkey = cvm_ecb_aes_setkey,
+			.encrypt = cvm_encrypt,
+			.decrypt = cvm_decrypt,
+		},
+	},
+	.cra_init = cvm_enc_dec_init,
+	.cra_module = THIS_MODULE,
+}, {
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
+	.cra_alignmask = 7,
+	.cra_priority = 4001,
+	.cra_name = "cfb(aes)",
+	.cra_driver_name = "cavium-cfb-aes",
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_u = {
+		.ablkcipher = {
+			.ivsize = AES_BLOCK_SIZE,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.setkey = cvm_cfb_aes_setkey,
+			.encrypt = cvm_encrypt,
+			.decrypt = cvm_decrypt,
+		},
+	},
+	.cra_init = cvm_enc_dec_init,
+	.cra_module = THIS_MODULE,
+}, {
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct cvm_des3_ctx),
+	.cra_alignmask = 7,
+	.cra_priority = 4001,
+	.cra_name = "cbc(des3_ede)",
+	.cra_driver_name = "cavium-cbc-des3_ede",
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+			.setkey = cvm_cbc_des3_setkey,
+			.encrypt = cvm_encrypt,
+			.decrypt = cvm_decrypt,
+		},
+	},
+	.cra_init = cvm_enc_dec_init,
+	.cra_module = THIS_MODULE,
+}, {
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct cvm_des3_ctx),
+	.cra_alignmask = 7,
+	.cra_priority = 4001,
+	.cra_name = "ecb(des3_ede)",
+	.cra_driver_name = "cavium-ecb-des3_ede",
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+			.setkey = cvm_ecb_des3_setkey,
+			.encrypt = cvm_encrypt,
+			.decrypt = cvm_decrypt,
+		},
+	},
+	.cra_init = cvm_enc_dec_init,
+	.cra_module = THIS_MODULE,
+} };
+
+static inline int cav_register_algs(void)
+{
+	int err = 0;
+
+	err = crypto_register_algs(algs, ARRAY_SIZE(algs));
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static inline void cav_unregister_algs(void)
+{
+	crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+}
+
+int cvm_crypto_init(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	u32 dev_count;
+
+	dev_count = dev_handle.dev_count;
+	dev_handle.cdev[dev_count] = cptvf;
+	dev_handle.dev_count++;
+
+	if (dev_count == 3) {
+		if (cav_register_algs()) {
+			dev_err(&pdev->dev, "Error in registering crypto algorithms\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void cvm_crypto_exit(void)
+{
+	u32 dev_count;
+
+	dev_count = --dev_handle.dev_count;
+	if (!dev_count)
+		cav_unregister_algs();
+}
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.h b/drivers/crypto/cavium/cpt/cptvf_algs.h
new file mode 100644
index 0000000..902f257
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _CPTVF_ALGS_H_
+#define _CPTVF_ALGS_H_
+
+#include "request_manager.h"
+
+#define MAX_DEVICES 16
+#define MAJOR_OP_FC 0x33
+#define MAX_ENC_KEY_SIZE 32
+#define MAX_HASH_KEY_SIZE 64
+#define MAX_KEY_SIZE (MAX_ENC_KEY_SIZE + MAX_HASH_KEY_SIZE)
+#define CONTROL_WORD_LEN 8
+#define KEY2_OFFSET 48
+
+#define DMA_MODE_FLAG(dma_mode) \
+	(((dma_mode) == DMA_GATHER_SCATTER) ? (1 << 7) : 0)
+
+enum req_type {
+	AE_CORE_REQ,
+	SE_CORE_REQ,
+};
+
+enum cipher_type {
+	DES3_CBC = 0x1,
+	DES3_ECB = 0x2,
+	AES_CBC = 0x3,
+	AES_ECB = 0x4,
+	AES_CFB = 0x5,
+	AES_CTR = 0x6,
+	AES_GCM = 0x7,
+	AES_XTS = 0x8
+};
+
+enum aes_type {
+	AES_128_BIT = 0x1,
+	AES_192_BIT = 0x2,
+	AES_256_BIT = 0x3
+};
+
+union encr_ctrl {
+	u64 flags;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 enc_cipher:4;
+		u64 reserved1:1;
+		u64 aes_key:2;
+		u64 iv_source:1;
+		u64 hash_type:4;
+		u64 reserved2:3;
+		u64 auth_input_type:1;
+		u64 mac_len:8;
+		u64 reserved3:8;
+		u64 encr_offset:16;
+		u64 iv_offset:8;
+		u64 auth_offset:8;
+#else
+		u64 auth_offset:8;
+		u64 iv_offset:8;
+		u64 encr_offset:16;
+		u64 reserved3:8;
+		u64 mac_len:8;
+		u64 auth_input_type:1;
+		u64 reserved2:3;
+		u64 hash_type:4;
+		u64 iv_source:1;
+		u64 aes_key:2;
+		u64 reserved1:1;
+		u64 enc_cipher:4;
+#endif
+	} e;
+};
+
+struct cvm_cipher {
+	const char *name;
+	u8 value;
+};
+
+struct enc_context {
+	union encr_ctrl enc_ctrl;
+	u8 encr_key[32];
+	u8 encr_iv[16];
+};
+
+struct fchmac_context {
+	u8 ipad[64];
+	u8 opad[64]; /* or OPAD */
+};
+
+struct fc_context {
+	struct enc_context enc;
+	struct fchmac_context hmac;
+};
+
+struct cvm_enc_ctx {
+	u32 key_len;
+	u8 enc_key[MAX_KEY_SIZE];
+	u8 cipher_type:4;
+	u8 key_type:2;
+};
+
+struct cvm_des3_ctx {
+	u32 key_len;
+	u8 des3_key[MAX_KEY_SIZE];
+};
+
+struct cvm_req_ctx {
+	struct cpt_request_info cpt_req;
+	u64 control_word;
+	struct fc_context fctx;
+};
+
+int cptvf_do_request(void *cptvf, struct cpt_request_info *req);
+#endif /*_CPTVF_ALGS_H_*/
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
new file mode 100644
index 0000000..5c796ed
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -0,0 +1,866 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "cptvf.h"
+
+#define DRV_NAME	"thunder-cptvf"
+#define DRV_VERSION	"1.0"
+
+struct cptvf_wqe {
+	struct tasklet_struct twork;
+	void *cptvf;
+	u32 qno;
+};
+
+struct cptvf_wqe_info {
+	struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
+};
+
+static void vq_work_handler(unsigned long data)
+{
+	struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
+	struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
+
+	vq_post_process(cwqe->cptvf, cwqe->qno);
+}
+
+static int init_worker_threads(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct cptvf_wqe_info *cwqe_info;
+	int i;
+
+	cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
+	if (!cwqe_info)
+		return -ENOMEM;
+
+	if (cptvf->nr_queues) {
+		dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
+			 cptvf->nr_queues);
+	}
+
+	for (i = 0; i < cptvf->nr_queues; i++) {
+		tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
+			     (u64)cwqe_info);
+		cwqe_info->vq_wqe[i].qno = i;
+		cwqe_info->vq_wqe[i].cptvf = cptvf;
+	}
+
+	cptvf->wqe_info = cwqe_info;
+
+	return 0;
+}
+
+static void cleanup_worker_threads(struct cpt_vf *cptvf)
+{
+	struct cptvf_wqe_info *cwqe_info;
+	struct pci_dev *pdev = cptvf->pdev;
+	int i;
+
+	cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
+	if (!cwqe_info)
+		return;
+
+	if (cptvf->nr_queues) {
+		dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
+			 cptvf->nr_queues);
+	}
+
+	for (i = 0; i < cptvf->nr_queues; i++)
+		tasklet_kill(&cwqe_info->vq_wqe[i].twork);
+
+	kzfree(cwqe_info);
+	cptvf->wqe_info = NULL;
+}
+
+static void free_pending_queues(struct pending_qinfo *pqinfo)
+{
+	int i;
+	struct pending_queue *queue;
+
+	for_each_pending_queue(pqinfo, queue, i) {
+		if (!queue->head)
+			continue;
+
+		/* free single queue */
+		kzfree((queue->head));
+
+		queue->front = 0;
+		queue->rear = 0;
+
+		return;
+	}
+
+	pqinfo->qlen = 0;
+	pqinfo->nr_queues = 0;
+}
+
+static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
+				u32 nr_queues)
+{
+	u32 i;
+	size_t size;
+	int ret;
+	struct pending_queue *queue = NULL;
+
+	pqinfo->nr_queues = nr_queues;
+	pqinfo->qlen = qlen;
+
+	size = (qlen * sizeof(struct pending_entry));
+
+	for_each_pending_queue(pqinfo, queue, i) {
+		queue->head = kzalloc((size), GFP_KERNEL);
+		if (!queue->head) {
+			ret = -ENOMEM;
+			goto pending_qfail;
+		}
+
+		queue->front = 0;
+		queue->rear = 0;
+		atomic64_set((&queue->pending_count), (0));
+
+		/* init queue spin lock */
+		spin_lock_init(&queue->lock);
+	}
+
+	return 0;
+
+pending_qfail:
+	free_pending_queues(pqinfo);
+
+	return ret;
+}
+
+static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	int ret;
+
+	if (!nr_queues)
+		return 0;
+
+	ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
+			nr_queues);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void cleanup_pending_queues(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+
+	if (!cptvf->nr_queues)
+		return;
+
+	dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
+		 cptvf->nr_queues);
+	free_pending_queues(&cptvf->pqinfo);
+}
+
+static void free_command_queues(struct cpt_vf *cptvf,
+				struct command_qinfo *cqinfo)
+{
+	int i;
+	struct command_queue *queue = NULL;
+	struct command_chunk *chunk = NULL;
+	struct pci_dev *pdev = cptvf->pdev;
+	struct hlist_node *node;
+
+	/* clean up for each queue */
+	for (i = 0; i < cptvf->nr_queues; i++) {
+		queue = &cqinfo->queue[i];
+		if (hlist_empty(&cqinfo->queue[i].chead))
+			continue;
+
+		hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
+					  nextchunk) {
+			dma_free_coherent(&pdev->dev, chunk->size,
+					  chunk->head,
+					  chunk->dma_addr);
+			chunk->head = NULL;
+			chunk->dma_addr = 0;
+			hlist_del(&chunk->nextchunk);
+			kzfree(chunk);
+		}
+
+		queue->nchunks = 0;
+		queue->idx = 0;
+	}
+
+	/* common cleanup */
+	cqinfo->cmd_size = 0;
+}
+
+static int alloc_command_queues(struct cpt_vf *cptvf,
+				struct command_qinfo *cqinfo, size_t cmd_size,
+				u32 qlen)
+{
+	int i;
+	size_t q_size;
+	struct command_queue *queue = NULL;
+	struct pci_dev *pdev = cptvf->pdev;
+
+	/* common init */
+	cqinfo->cmd_size = cmd_size;
+	/* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
+	cptvf->qsize = min(qlen, cqinfo->qchunksize) *
+			CPT_NEXT_CHUNK_PTR_SIZE + 1;
+	/* Qsize in bytes to create space for alignment */
+	q_size = qlen * cqinfo->cmd_size;
+
+	/* per queue initialization */
+	for (i = 0; i < cptvf->nr_queues; i++) {
+		size_t c_size = 0;
+		size_t rem_q_size = q_size;
+		struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
+		u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
+
+		queue = &cqinfo->queue[i];
+		INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
+		do {
+			curr = kzalloc(sizeof(*curr), GFP_KERNEL);
+			if (!curr)
+				goto cmd_qfail;
+
+			c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
+					rem_q_size;
+			curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev,
+					  c_size + CPT_NEXT_CHUNK_PTR_SIZE,
+					  &curr->dma_addr, GFP_KERNEL);
+			if (!curr->head) {
+				dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
+					i, queue->nchunks);
+				kfree(curr);
+				goto cmd_qfail;
+			}
+
+			curr->size = c_size;
+			if (queue->nchunks == 0) {
+				hlist_add_head(&curr->nextchunk,
+					       &cqinfo->queue[i].chead);
+				first = curr;
+			} else {
+				hlist_add_behind(&curr->nextchunk,
+						 &last->nextchunk);
+			}
+
+			queue->nchunks++;
+			rem_q_size -= c_size;
+			if (last)
+				*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
+
+			last = curr;
+		} while (rem_q_size);
+
+		/* Make the queue circular */
+		/* Tie back last chunk entry to head */
+		curr = first;
+		*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
+		queue->qhead = curr;
+		spin_lock_init(&queue->lock);
+	}
+	return 0;
+
+cmd_qfail:
+	free_command_queues(cptvf, cqinfo);
+	return -ENOMEM;
+}
+
+static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	int ret;
+
+	/* setup AE command queues */
+	ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
+				   qlen);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
+			cptvf->nr_queues);
+		return ret;
+	}
+
+	return ret;
+}
+
+static void cleanup_command_queues(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+
+	if (!cptvf->nr_queues)
+		return;
+
+	dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
+		 cptvf->nr_queues);
+	free_command_queues(cptvf, &cptvf->cqinfo);
+}
+
+static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
+{
+	cleanup_worker_threads(cptvf);
+	cleanup_pending_queues(cptvf);
+	cleanup_command_queues(cptvf);
+}
+
+static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	int ret = 0;
+	u32 max_dev_queues = 0;
+
+	max_dev_queues = CPT_NUM_QS_PER_VF;
+	/* possible cpus */
+	nr_queues = min_t(u32, nr_queues, max_dev_queues);
+	cptvf->nr_queues = nr_queues;
+
+	ret = init_command_queues(cptvf, qlen);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
+			nr_queues);
+		return ret;
+	}
+
+	ret = init_pending_queues(cptvf, qlen, nr_queues);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
+			nr_queues);
+		goto setup_pqfail;
+	}
+
+	/* Create worker threads for BH processing */
+	ret = init_worker_threads(cptvf);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to setup worker threads\n");
+		goto init_work_fail;
+	}
+
+	return 0;
+
+init_work_fail:
+	cleanup_worker_threads(cptvf);
+	cleanup_pending_queues(cptvf);
+
+setup_pqfail:
+	cleanup_command_queues(cptvf);
+
+	return ret;
+}
+
+static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
+{
+	irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
+	free_cpumask_var(cptvf->affinity_mask[vec]);
+}
+
+static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
+{
+	union cptx_vqx_ctl vqx_ctl;
+
+	vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
+	vqx_ctl.s.ena = val;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
+}
+
+void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
+{
+	union cptx_vqx_doorbell vqx_dbell;
+
+	vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
+				     CPTX_VQX_DOORBELL(0, 0));
+	vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
+			vqx_dbell.u);
+}
+
+static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
+{
+	union cptx_vqx_inprog vqx_inprg;
+
+	vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
+	vqx_inprg.s.inflight = val;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
+}
+
+static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
+{
+	union cptx_vqx_done_wait vqx_dwait;
+
+	vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
+				     CPTX_VQX_DONE_WAIT(0, 0));
+	vqx_dwait.s.num_wait = val;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
+			vqx_dwait.u);
+}
+
+static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
+{
+	union cptx_vqx_done_wait vqx_dwait;
+
+	vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
+				     CPTX_VQX_DONE_WAIT(0, 0));
+	vqx_dwait.s.time_wait = time;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
+			vqx_dwait.u);
+}
+
+static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_misc_ena_w1s vqx_misc_ena;
+
+	vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_MISC_ENA_W1S(0, 0));
+	/* Set mbox(0) interupts for the requested vf */
+	vqx_misc_ena.s.swerr = 1;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
+			vqx_misc_ena.u);
+}
+
+static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_misc_ena_w1s vqx_misc_ena;
+
+	vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_MISC_ENA_W1S(0, 0));
+	/* Set mbox(0) interupts for the requested vf */
+	vqx_misc_ena.s.mbox = 1;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
+			vqx_misc_ena.u);
+}
+
+static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_done_ena_w1s vqx_done_ena;
+
+	vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_DONE_ENA_W1S(0, 0));
+	/* Set DONE interrupt for the requested vf */
+	vqx_done_ena.s.done = 1;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
+			vqx_done_ena.u);
+}
+
+static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_misc_int vqx_misc_int;
+
+	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_MISC_INT(0, 0));
+	/* W1C for the VF */
+	vqx_misc_int.s.dovf = 1;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
+			vqx_misc_int.u);
+}
+
+static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_misc_int vqx_misc_int;
+
+	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_MISC_INT(0, 0));
+	/* W1C for the VF */
+	vqx_misc_int.s.irde = 1;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
+			vqx_misc_int.u);
+}
+
+static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_misc_int vqx_misc_int;
+
+	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_MISC_INT(0, 0));
+	/* W1C for the VF */
+	vqx_misc_int.s.nwrp = 1;
+	cpt_write_csr64(cptvf->reg_base,
+			CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_misc_int vqx_misc_int;
+
+	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_MISC_INT(0, 0));
+	/* W1C for the VF */
+	vqx_misc_int.s.mbox = 1;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
+			vqx_misc_int.u);
+}
+
+static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_misc_int vqx_misc_int;
+
+	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_MISC_INT(0, 0));
+	/* W1C for the VF */
+	vqx_misc_int.s.swerr = 1;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
+			vqx_misc_int.u);
+}
+
+static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
+{
+	return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
+}
+
+static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
+{
+	struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
+	struct pci_dev *pdev = cptvf->pdev;
+	u64 intr;
+
+	intr = cptvf_read_vf_misc_intr_status(cptvf);
+	/*Check for MISC interrupt types*/
+	if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
+		dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
+			intr, cptvf->vfid);
+		cptvf_handle_mbox_intr(cptvf);
+		cptvf_clear_mbox_intr(cptvf);
+	} else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
+		cptvf_clear_dovf_intr(cptvf);
+		/*Clear doorbell count*/
+		cptvf_write_vq_doorbell(cptvf, 0);
+		dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
+			intr, cptvf->vfid);
+	} else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
+		cptvf_clear_irde_intr(cptvf);
+		dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
+			intr, cptvf->vfid);
+	} else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
+		cptvf_clear_nwrp_intr(cptvf);
+		dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
+			intr, cptvf->vfid);
+	} else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
+		cptvf_clear_swerr_intr(cptvf);
+		dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
+			intr, cptvf->vfid);
+	} else {
+		dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
+			cptvf->vfid);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
+						 int qno)
+{
+	struct cptvf_wqe_info *nwqe_info;
+
+	if (unlikely(qno >= cptvf->nr_queues))
+		return NULL;
+	nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
+
+	return &nwqe_info->vq_wqe[qno];
+}
+
+static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
+{
+	union cptx_vqx_done vqx_done;
+
+	vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
+	return vqx_done.s.done;
+}
+
+static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
+					   u32 ackcnt)
+{
+	union cptx_vqx_done_ack vqx_dack_cnt;
+
+	vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
+					CPTX_VQX_DONE_ACK(0, 0));
+	vqx_dack_cnt.s.done_ack = ackcnt;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
+			vqx_dack_cnt.u);
+}
+
+static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
+{
+	struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
+	struct pci_dev *pdev = cptvf->pdev;
+	/* Read the number of completions */
+	u32 intr = cptvf_read_vq_done_count(cptvf);
+
+	if (intr) {
+		struct cptvf_wqe *wqe;
+
+		/* Acknowledge the number of
+		 * scheduled completions for processing
+		 */
+		cptvf_write_vq_done_ack(cptvf, intr);
+		wqe = get_cptvf_vq_wqe(cptvf, 0);
+		if (unlikely(!wqe)) {
+			dev_err(&pdev->dev, "No work to schedule for VF (%d)",
+				cptvf->vfid);
+			return IRQ_NONE;
+		}
+		tasklet_hi_schedule(&wqe->twork);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	int cpu;
+
+	if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
+				GFP_KERNEL)) {
+		dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
+			cptvf->vfid);
+		return;
+	}
+
+	cpu = cptvf->vfid % num_online_cpus();
+	cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
+			cptvf->affinity_mask[vec]);
+	irq_set_affinity_hint(pci_irq_vector(pdev, vec),
+			cptvf->affinity_mask[vec]);
+}
+
+static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
+{
+	union cptx_vqx_saddr vqx_saddr;
+
+	vqx_saddr.u = val;
+	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
+}
+
+void cptvf_device_init(struct cpt_vf *cptvf)
+{
+	u64 base_addr = 0;
+
+	/* Disable the VQ */
+	cptvf_write_vq_ctl(cptvf, 0);
+	/* Reset the doorbell */
+	cptvf_write_vq_doorbell(cptvf, 0);
+	/* Clear inflight */
+	cptvf_write_vq_inprog(cptvf, 0);
+	/* Write VQ SADDR */
+	/* TODO: for now only one queue, so hard coded */
+	base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
+	cptvf_write_vq_saddr(cptvf, base_addr);
+	/* Configure timerhold / coalescence */
+	cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
+	cptvf_write_vq_done_numwait(cptvf, 1);
+	/* Enable the VQ */
+	cptvf_write_vq_ctl(cptvf, 1);
+	/* Flag the VF ready */
+	cptvf->flags |= CPT_FLAG_DEVICE_READY;
+}
+
+static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct cpt_vf *cptvf;
+	int    err;
+
+	cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
+	if (!cptvf)
+		return -ENOMEM;
+
+	pci_set_drvdata(pdev, cptvf);
+	cptvf->pdev = pdev;
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Failed to enable PCI device\n");
+		pci_set_drvdata(pdev, NULL);
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(dev, "PCI request regions failed 0x%x\n", err);
+		goto cptvf_err_disable_device;
+	}
+	/* Mark as VF driver */
+	cptvf->flags |= CPT_FLAG_VF_DRIVER;
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get usable DMA configuration\n");
+		goto cptvf_err_release_regions;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+		goto cptvf_err_release_regions;
+	}
+
+	/* MAP PF's configuration registers */
+	cptvf->reg_base = pcim_iomap(pdev, 0, 0);
+	if (!cptvf->reg_base) {
+		dev_err(dev, "Cannot map config register space, aborting\n");
+		err = -ENOMEM;
+		goto cptvf_err_release_regions;
+	}
+
+	cptvf->node = dev_to_node(&pdev->dev);
+	err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
+			CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
+	if (err < 0) {
+		dev_err(dev, "Request for #%d msix vectors failed\n",
+			CPT_VF_MSIX_VECTORS);
+		goto cptvf_err_release_regions;
+	}
+
+	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
+			  cptvf_misc_intr_handler, 0, "CPT VF misc intr",
+			  cptvf);
+	if (err) {
+		dev_err(dev, "Request misc irq failed");
+		goto cptvf_free_vectors;
+	}
+
+	/* Enable mailbox interrupt */
+	cptvf_enable_mbox_interrupts(cptvf);
+	cptvf_enable_swerr_interrupts(cptvf);
+
+	/* Check ready with PF */
+	/* Gets chip ID / device Id from PF if ready */
+	err = cptvf_check_pf_ready(cptvf);
+	if (err) {
+		dev_err(dev, "PF not responding to READY msg");
+		goto cptvf_free_misc_irq;
+	}
+
+	/* CPT VF software resources initialization */
+	cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
+	err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
+	if (err) {
+		dev_err(dev, "cptvf_sw_init() failed");
+		goto cptvf_free_misc_irq;
+	}
+	/* Convey VQ LEN to PF */
+	err = cptvf_send_vq_size_msg(cptvf);
+	if (err) {
+		dev_err(dev, "PF not responding to QLEN msg");
+		goto cptvf_free_misc_irq;
+	}
+
+	/* CPT VF device initialization */
+	cptvf_device_init(cptvf);
+	/* Send msg to PF to assign currnet Q to required group */
+	cptvf->vfgrp = 1;
+	err = cptvf_send_vf_to_grp_msg(cptvf);
+	if (err) {
+		dev_err(dev, "PF not responding to VF_GRP msg");
+		goto cptvf_free_misc_irq;
+	}
+
+	cptvf->priority = 1;
+	err = cptvf_send_vf_priority_msg(cptvf);
+	if (err) {
+		dev_err(dev, "PF not responding to VF_PRIO msg");
+		goto cptvf_free_misc_irq;
+	}
+
+	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
+			  cptvf_done_intr_handler, 0, "CPT VF done intr",
+			  cptvf);
+	if (err) {
+		dev_err(dev, "Request done irq failed\n");
+		goto cptvf_free_misc_irq;
+	}
+
+	/* Enable mailbox interrupt */
+	cptvf_enable_done_interrupts(cptvf);
+
+	/* Set irq affinity masks */
+	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+
+	err = cptvf_send_vf_up(cptvf);
+	if (err) {
+		dev_err(dev, "PF not responding to UP msg");
+		goto cptvf_free_irq_affinity;
+	}
+	err = cvm_crypto_init(cptvf);
+	if (err) {
+		dev_err(dev, "Algorithm register failed\n");
+		goto cptvf_free_irq_affinity;
+	}
+	return 0;
+
+cptvf_free_irq_affinity:
+	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+cptvf_free_misc_irq:
+	free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
+cptvf_free_vectors:
+	pci_free_irq_vectors(cptvf->pdev);
+cptvf_err_release_regions:
+	pci_release_regions(pdev);
+cptvf_err_disable_device:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	return err;
+}
+
+static void cptvf_remove(struct pci_dev *pdev)
+{
+	struct cpt_vf *cptvf = pci_get_drvdata(pdev);
+
+	if (!cptvf) {
+		dev_err(&pdev->dev, "Invalid CPT-VF device\n");
+		return;
+	}
+
+	/* Convey DOWN to PF */
+	if (cptvf_send_vf_down(cptvf)) {
+		dev_err(&pdev->dev, "PF not responding to DOWN msg");
+	} else {
+		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
+		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
+		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
+		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
+		pci_free_irq_vectors(cptvf->pdev);
+		cptvf_sw_cleanup(cptvf);
+		pci_set_drvdata(pdev, NULL);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		cvm_crypto_exit();
+	}
+}
+
+static void cptvf_shutdown(struct pci_dev *pdev)
+{
+	cptvf_remove(pdev);
+}
+
+/* Supported devices */
+static const struct pci_device_id cptvf_id_table[] = {
+	{PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
+	{ 0, }  /* end of table */
+};
+
+static struct pci_driver cptvf_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = cptvf_id_table,
+	.probe = cptvf_probe,
+	.remove = cptvf_remove,
+	.shutdown = cptvf_shutdown,
+};
+
+module_pci_driver(cptvf_pci_driver);
+
+MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
+MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cptvf_id_table);
diff --git a/drivers/crypto/cavium/cpt/cptvf_mbox.c b/drivers/crypto/cavium/cpt/cptvf_mbox.c
new file mode 100644
index 0000000..d5ec3b8
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptvf_mbox.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include "cptvf.h"
+
+static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
+{
+	/* Writing mbox(1) causes interrupt */
+	cpt_write_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 0),
+			mbx->msg);
+	cpt_write_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 1),
+			mbx->data);
+}
+
+/* ACKs PF's mailbox message
+ */
+void cptvf_mbox_send_ack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
+{
+	mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
+	cptvf_send_msg_to_pf(cptvf, mbx);
+}
+
+/* NACKs PF's mailbox message that VF is not able to
+ * complete the action
+ */
+void cptvf_mbox_send_nack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
+{
+	mbx->msg = CPT_MBOX_MSG_TYPE_NACK;
+	cptvf_send_msg_to_pf(cptvf, mbx);
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+void cptvf_handle_mbox_intr(struct cpt_vf *cptvf)
+{
+	struct cpt_mbox mbx = {};
+
+	/*
+	 * MBOX[0] contains msg
+	 * MBOX[1] contains data
+	 */
+	mbx.msg  = cpt_read_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 0));
+	mbx.data = cpt_read_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 1));
+	dev_dbg(&cptvf->pdev->dev, "%s: Mailbox msg 0x%llx from PF\n",
+		__func__, mbx.msg);
+	switch (mbx.msg) {
+	case CPT_MSG_READY:
+	{
+		cptvf->pf_acked = true;
+		cptvf->vfid = mbx.data;
+		dev_dbg(&cptvf->pdev->dev, "Received VFID %d\n", cptvf->vfid);
+		break;
+	}
+	case CPT_MSG_QBIND_GRP:
+		cptvf->pf_acked = true;
+		cptvf->vftype = mbx.data;
+		dev_dbg(&cptvf->pdev->dev, "VF %d type %s group %d\n",
+			cptvf->vfid, ((mbx.data == SE_TYPES) ? "SE" : "AE"),
+			cptvf->vfgrp);
+		break;
+	case CPT_MBOX_MSG_TYPE_ACK:
+		cptvf->pf_acked = true;
+		break;
+	case CPT_MBOX_MSG_TYPE_NACK:
+		cptvf->pf_nacked = true;
+		break;
+	default:
+		dev_err(&cptvf->pdev->dev, "Invalid msg from PF, msg 0x%llx\n",
+			mbx.msg);
+		break;
+	}
+}
+
+static int cptvf_send_msg_to_pf_timeout(struct cpt_vf *cptvf,
+					struct cpt_mbox *mbx)
+{
+	int timeout = CPT_MBOX_MSG_TIMEOUT;
+	int sleep = 10;
+
+	cptvf->pf_acked = false;
+	cptvf->pf_nacked = false;
+	cptvf_send_msg_to_pf(cptvf, mbx);
+	/* Wait for previous message to be acked, timeout 2sec */
+	while (!cptvf->pf_acked) {
+		if (cptvf->pf_nacked)
+			return -EINVAL;
+		msleep(sleep);
+		if (cptvf->pf_acked)
+			break;
+		timeout -= sleep;
+		if (!timeout) {
+			dev_err(&cptvf->pdev->dev, "PF didn't ack to mbox msg %llx from VF%u\n",
+				(mbx->msg & 0xFF), cptvf->vfid);
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Checks if VF is able to comminicate with PF
+ * and also gets the CPT number this VF is associated to.
+ */
+int cptvf_check_pf_ready(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct cpt_mbox mbx = {};
+
+	mbx.msg = CPT_MSG_READY;
+	if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) {
+		dev_err(&pdev->dev, "PF didn't respond to READY msg\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/*
+ * Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF.
+ * Must be ACKed.
+ */
+int cptvf_send_vq_size_msg(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct cpt_mbox mbx = {};
+
+	mbx.msg = CPT_MSG_QLEN;
+	mbx.data = cptvf->qsize;
+	if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) {
+		dev_err(&pdev->dev, "PF didn't respond to vq_size msg\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/*
+ * Communicate VF group required to PF and get the VQ binded to that group
+ */
+int cptvf_send_vf_to_grp_msg(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct cpt_mbox mbx = {};
+
+	mbx.msg = CPT_MSG_QBIND_GRP;
+	/* Convey group of the VF */
+	mbx.data = cptvf->vfgrp;
+	if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) {
+		dev_err(&pdev->dev, "PF didn't respond to vf_type msg\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/*
+ * Communicate VF group required to PF and get the VQ binded to that group
+ */
+int cptvf_send_vf_priority_msg(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct cpt_mbox mbx = {};
+
+	mbx.msg = CPT_MSG_VQ_PRIORITY;
+	/* Convey group of the VF */
+	mbx.data = cptvf->priority;
+	if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) {
+		dev_err(&pdev->dev, "PF didn't respond to vf_type msg\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/*
+ * Communicate to PF that VF is UP and running
+ */
+int cptvf_send_vf_up(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct cpt_mbox mbx = {};
+
+	mbx.msg = CPT_MSG_VF_UP;
+	if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) {
+		dev_err(&pdev->dev, "PF didn't respond to UP msg\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/*
+ * Communicate to PF that VF is DOWN and running
+ */
+int cptvf_send_vf_down(struct cpt_vf *cptvf)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct cpt_mbox mbx = {};
+
+	mbx.msg = CPT_MSG_VF_DOWN;
+	if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) {
+		dev_err(&pdev->dev, "PF didn't respond to DOWN msg\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
new file mode 100644
index 0000000..b0ba433
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -0,0 +1,594 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include "cptvf.h"
+#include "request_manager.h"
+
+/**
+ * get_free_pending_entry - get free entry from pending queue
+ * @param pqinfo: pending_qinfo structure
+ * @param qno: queue number
+ */
+static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
+						    int qlen)
+{
+	struct pending_entry *ent = NULL;
+
+	ent = &q->head[q->rear];
+	if (unlikely(ent->busy)) {
+		ent = NULL;
+		goto no_free_entry;
+	}
+
+	q->rear++;
+	if (unlikely(q->rear == qlen))
+		q->rear = 0;
+
+no_free_entry:
+	return ent;
+}
+
+static inline void pending_queue_inc_front(struct pending_qinfo *pqinfo,
+					   int qno)
+{
+	struct pending_queue *queue = &pqinfo->queue[qno];
+
+	queue->front++;
+	if (unlikely(queue->front == pqinfo->qlen))
+		queue->front = 0;
+}
+
+static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list,
+				 int buf_count, u8 *buffer)
+{
+	int ret = 0, i, j;
+	int components;
+	struct sglist_component *sg_ptr = NULL;
+	struct pci_dev *pdev = cptvf->pdev;
+
+	if (unlikely(!list)) {
+		dev_err(&pdev->dev, "Input List pointer is NULL\n");
+		return -EFAULT;
+	}
+
+	for (i = 0; i < buf_count; i++) {
+		if (likely(list[i].vptr)) {
+			list[i].dma_addr = dma_map_single(&pdev->dev,
+							  list[i].vptr,
+							  list[i].size,
+							  DMA_BIDIRECTIONAL);
+			if (unlikely(dma_mapping_error(&pdev->dev,
+						       list[i].dma_addr))) {
+				dev_err(&pdev->dev, "DMA map kernel buffer failed for component: %d\n",
+					i);
+				ret = -EIO;
+				goto sg_cleanup;
+			}
+		}
+	}
+
+	components = buf_count / 4;
+	sg_ptr = (struct sglist_component *)buffer;
+	for (i = 0; i < components; i++) {
+		sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
+		sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
+		sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
+		sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);
+		sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+		sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+		sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+		sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
+		sg_ptr++;
+	}
+
+	components = buf_count % 4;
+
+	switch (components) {
+	case 3:
+		sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
+		sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+		/* Fall through */
+	case 2:
+		sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
+		sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+		/* Fall through */
+	case 1:
+		sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
+		sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+
+sg_cleanup:
+	for (j = 0; j < i; j++) {
+		if (list[j].dma_addr) {
+			dma_unmap_single(&pdev->dev, list[i].dma_addr,
+					 list[i].size, DMA_BIDIRECTIONAL);
+		}
+
+		list[j].dma_addr = 0;
+	}
+
+	return ret;
+}
+
+static inline int setup_sgio_list(struct cpt_vf *cptvf,
+				  struct cpt_info_buffer *info,
+				  struct cpt_request_info *req)
+{
+	u16 g_sz_bytes = 0, s_sz_bytes = 0;
+	int ret = 0;
+	struct pci_dev *pdev = cptvf->pdev;
+
+	if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) {
+		dev_err(&pdev->dev, "Request SG components are higher than supported\n");
+		ret = -EINVAL;
+		goto  scatter_gather_clean;
+	}
+
+	/* Setup gather (input) components */
+	g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
+	info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
+	if (!info->gather_components) {
+		ret = -ENOMEM;
+		goto  scatter_gather_clean;
+	}
+
+	ret = setup_sgio_components(cptvf, req->in,
+				    req->incnt,
+				    info->gather_components);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to setup gather list\n");
+		ret = -EFAULT;
+		goto  scatter_gather_clean;
+	}
+
+	/* Setup scatter (output) components */
+	s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
+	info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
+	if (!info->scatter_components) {
+		ret = -ENOMEM;
+		goto  scatter_gather_clean;
+	}
+
+	ret = setup_sgio_components(cptvf, req->out,
+				    req->outcnt,
+				    info->scatter_components);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to setup gather list\n");
+		ret = -EFAULT;
+		goto  scatter_gather_clean;
+	}
+
+	/* Create and initialize DPTR */
+	info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+	info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
+	if (!info->in_buffer) {
+		ret = -ENOMEM;
+		goto  scatter_gather_clean;
+	}
+
+	((u16 *)info->in_buffer)[0] = req->outcnt;
+	((u16 *)info->in_buffer)[1] = req->incnt;
+	((u16 *)info->in_buffer)[2] = 0;
+	((u16 *)info->in_buffer)[3] = 0;
+	*(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer);
+
+	memcpy(&info->in_buffer[8], info->gather_components,
+	       g_sz_bytes);
+	memcpy(&info->in_buffer[8 + g_sz_bytes],
+	       info->scatter_components, s_sz_bytes);
+
+	info->dptr_baddr = dma_map_single(&pdev->dev,
+					  (void *)info->in_buffer,
+					  info->dlen,
+					  DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(&pdev->dev, info->dptr_baddr)) {
+		dev_err(&pdev->dev, "Mapping DPTR Failed %d\n", info->dlen);
+		ret = -EIO;
+		goto  scatter_gather_clean;
+	}
+
+	/* Create and initialize RPTR */
+	info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
+	if (!info->out_buffer) {
+		ret = -ENOMEM;
+		goto scatter_gather_clean;
+	}
+
+	*((u64 *)info->out_buffer) = ~((u64)COMPLETION_CODE_INIT);
+	info->alternate_caddr = (u64 *)info->out_buffer;
+	info->rptr_baddr = dma_map_single(&pdev->dev,
+					  (void *)info->out_buffer,
+					  COMPLETION_CODE_SIZE,
+					  DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(&pdev->dev, info->rptr_baddr)) {
+		dev_err(&pdev->dev, "Mapping RPTR Failed %d\n",
+			COMPLETION_CODE_SIZE);
+		ret = -EIO;
+		goto  scatter_gather_clean;
+	}
+
+	return 0;
+
+scatter_gather_clean:
+	return ret;
+}
+
+int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
+		     u32 qno)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct command_qinfo *qinfo = NULL;
+	struct command_queue *queue;
+	struct command_chunk *chunk;
+	u8 *ent;
+	int ret = 0;
+
+	if (unlikely(qno >= cptvf->nr_queues)) {
+		dev_err(&pdev->dev, "Invalid queue (qno: %d, nr_queues: %d)\n",
+			qno, cptvf->nr_queues);
+		return -EINVAL;
+	}
+
+	qinfo = &cptvf->cqinfo;
+	queue = &qinfo->queue[qno];
+	/* lock commad queue */
+	spin_lock(&queue->lock);
+	ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
+	memcpy(ent, (void *)cmd, qinfo->cmd_size);
+
+	if (++queue->idx >= queue->qhead->size / 64) {
+		struct hlist_node *node;
+
+		hlist_for_each(node, &queue->chead) {
+			chunk = hlist_entry(node, struct command_chunk,
+					    nextchunk);
+			if (chunk == queue->qhead) {
+				continue;
+			} else {
+				queue->qhead = chunk;
+				break;
+			}
+		}
+		queue->idx = 0;
+	}
+	/* make sure all memory stores are done before ringing doorbell */
+	smp_wmb();
+	cptvf_write_vq_doorbell(cptvf, 1);
+	/* unlock command queue */
+	spin_unlock(&queue->lock);
+
+	return ret;
+}
+
+void do_request_cleanup(struct cpt_vf *cptvf,
+			struct cpt_info_buffer *info)
+{
+	int i;
+	struct pci_dev *pdev = cptvf->pdev;
+	struct cpt_request_info *req;
+
+	if (info->dptr_baddr)
+		dma_unmap_single(&pdev->dev, info->dptr_baddr,
+				 info->dlen, DMA_BIDIRECTIONAL);
+
+	if (info->rptr_baddr)
+		dma_unmap_single(&pdev->dev, info->rptr_baddr,
+				 COMPLETION_CODE_SIZE, DMA_BIDIRECTIONAL);
+
+	if (info->comp_baddr)
+		dma_unmap_single(&pdev->dev, info->comp_baddr,
+				 sizeof(union cpt_res_s), DMA_BIDIRECTIONAL);
+
+	if (info->req) {
+		req = info->req;
+		for (i = 0; i < req->outcnt; i++) {
+			if (req->out[i].dma_addr)
+				dma_unmap_single(&pdev->dev,
+						 req->out[i].dma_addr,
+						 req->out[i].size,
+						 DMA_BIDIRECTIONAL);
+		}
+
+		for (i = 0; i < req->incnt; i++) {
+			if (req->in[i].dma_addr)
+				dma_unmap_single(&pdev->dev,
+						 req->in[i].dma_addr,
+						 req->in[i].size,
+						 DMA_BIDIRECTIONAL);
+		}
+	}
+
+	if (info->scatter_components)
+		kzfree(info->scatter_components);
+
+	if (info->gather_components)
+		kzfree(info->gather_components);
+
+	if (info->out_buffer)
+		kzfree(info->out_buffer);
+
+	if (info->in_buffer)
+		kzfree(info->in_buffer);
+
+	if (info->completion_addr)
+		kzfree((void *)info->completion_addr);
+
+	kzfree(info);
+}
+
+void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+
+	if (!info) {
+		dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n");
+		return;
+	}
+
+	do_request_cleanup(cptvf, info);
+}
+
+static inline void process_pending_queue(struct cpt_vf *cptvf,
+					 struct pending_qinfo *pqinfo,
+					 int qno)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+	struct pending_queue *pqueue = &pqinfo->queue[qno];
+	struct pending_entry *pentry = NULL;
+	struct cpt_info_buffer *info = NULL;
+	union cpt_res_s *status = NULL;
+	unsigned char ccode;
+
+	while (1) {
+		spin_lock_bh(&pqueue->lock);
+		pentry = &pqueue->head[pqueue->front];
+		if (unlikely(!pentry->busy)) {
+			spin_unlock_bh(&pqueue->lock);
+			break;
+		}
+
+		info = (struct cpt_info_buffer *)pentry->post_arg;
+		if (unlikely(!info)) {
+			dev_err(&pdev->dev, "Pending Entry post arg NULL\n");
+			pending_queue_inc_front(pqinfo, qno);
+			spin_unlock_bh(&pqueue->lock);
+			continue;
+		}
+
+		status = (union cpt_res_s *)pentry->completion_addr;
+		ccode = status->s.compcode;
+		if ((status->s.compcode == CPT_COMP_E_FAULT) ||
+		    (status->s.compcode == CPT_COMP_E_SWERR)) {
+			dev_err(&pdev->dev, "Request failed with %s\n",
+				(status->s.compcode == CPT_COMP_E_FAULT) ?
+				"DMA Fault" : "Software error");
+			pentry->completion_addr = NULL;
+			pentry->busy = false;
+			atomic64_dec((&pqueue->pending_count));
+			pentry->post_arg = NULL;
+			pending_queue_inc_front(pqinfo, qno);
+			do_request_cleanup(cptvf, info);
+			spin_unlock_bh(&pqueue->lock);
+			break;
+		} else if (status->s.compcode == COMPLETION_CODE_INIT) {
+			/* check for timeout */
+			if (time_after_eq(jiffies,
+					  (info->time_in +
+					  (CPT_COMMAND_TIMEOUT * HZ)))) {
+				dev_err(&pdev->dev, "Request timed out");
+				pentry->completion_addr = NULL;
+				pentry->busy = false;
+				atomic64_dec((&pqueue->pending_count));
+				pentry->post_arg = NULL;
+				pending_queue_inc_front(pqinfo, qno);
+				do_request_cleanup(cptvf, info);
+				spin_unlock_bh(&pqueue->lock);
+				break;
+			} else if ((*info->alternate_caddr ==
+				(~COMPLETION_CODE_INIT)) &&
+				(info->extra_time < TIME_IN_RESET_COUNT)) {
+				info->time_in = jiffies;
+				info->extra_time++;
+				spin_unlock_bh(&pqueue->lock);
+				break;
+			}
+		}
+
+		pentry->completion_addr = NULL;
+		pentry->busy = false;
+		pentry->post_arg = NULL;
+		atomic64_dec((&pqueue->pending_count));
+		pending_queue_inc_front(pqinfo, qno);
+		spin_unlock_bh(&pqueue->lock);
+
+		do_post_process(info->cptvf, info);
+		/*
+		 * Calling callback after we find
+		 * that the request has been serviced
+		 */
+		pentry->callback(ccode, pentry->callback_arg);
+	}
+}
+
+int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
+{
+	int ret = 0, clear = 0, queue = 0;
+	struct cpt_info_buffer *info = NULL;
+	struct cptvf_request *cpt_req = NULL;
+	union ctrl_info *ctrl = NULL;
+	union cpt_res_s *result = NULL;
+	struct pending_entry *pentry = NULL;
+	struct pending_queue *pqueue = NULL;
+	struct pci_dev *pdev = cptvf->pdev;
+	u8 group = 0;
+	struct cpt_vq_command vq_cmd;
+	union cpt_inst_s cptinst;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (unlikely(!info)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
+		return -ENOMEM;
+	}
+
+	cpt_req = (struct cptvf_request *)&req->req;
+	ctrl = (union ctrl_info *)&req->ctrl;
+
+	info->cptvf = cptvf;
+	group = ctrl->s.grp;
+	ret = setup_sgio_list(cptvf, info, req);
+	if (ret) {
+		dev_err(&pdev->dev, "Setting up SG list failed");
+		goto request_cleanup;
+	}
+
+	cpt_req->dlen = info->dlen;
+	/*
+	 * Get buffer for union cpt_res_s response
+	 * structure and its physical address
+	 */
+	info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
+	if (unlikely(!info->completion_addr)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
+		ret = -ENOMEM;
+		goto request_cleanup;
+	}
+
+	result = (union cpt_res_s *)info->completion_addr;
+	result->s.compcode = COMPLETION_CODE_INIT;
+	info->comp_baddr = dma_map_single(&pdev->dev,
+					       (void *)info->completion_addr,
+					       sizeof(union cpt_res_s),
+					       DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(&pdev->dev, info->comp_baddr)) {
+		dev_err(&pdev->dev, "mapping compptr Failed %lu\n",
+			sizeof(union cpt_res_s));
+		ret = -EFAULT;
+		goto  request_cleanup;
+	}
+
+	/* Fill the VQ command */
+	vq_cmd.cmd.u64 = 0;
+	vq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
+	vq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
+	vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
+	vq_cmd.cmd.s.dlen   = cpu_to_be16(cpt_req->dlen);
+
+	/* 64-bit swap for microcode data reads, not needed for addresses*/
+	vq_cmd.cmd.u64 = cpu_to_be64(vq_cmd.cmd.u64);
+	vq_cmd.dptr = info->dptr_baddr;
+	vq_cmd.rptr = info->rptr_baddr;
+	vq_cmd.cptr.u64 = 0;
+	vq_cmd.cptr.s.grp = group;
+	/* Get Pending Entry to submit command */
+	/* Always queue 0, because 1 queue per VF */
+	queue = 0;
+	pqueue = &cptvf->pqinfo.queue[queue];
+
+	if (atomic64_read(&pqueue->pending_count) > PENDING_THOLD) {
+		dev_err(&pdev->dev, "pending threshold reached\n");
+		process_pending_queue(cptvf, &cptvf->pqinfo, queue);
+	}
+
+get_pending_entry:
+	spin_lock_bh(&pqueue->lock);
+	pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen);
+	if (unlikely(!pentry)) {
+		spin_unlock_bh(&pqueue->lock);
+		if (clear == 0) {
+			process_pending_queue(cptvf, &cptvf->pqinfo, queue);
+			clear = 1;
+			goto get_pending_entry;
+		}
+		dev_err(&pdev->dev, "Get free entry failed\n");
+		dev_err(&pdev->dev, "queue: %d, rear: %d, front: %d\n",
+			queue, pqueue->rear, pqueue->front);
+		ret = -EFAULT;
+		goto request_cleanup;
+	}
+
+	pentry->completion_addr = info->completion_addr;
+	pentry->post_arg = (void *)info;
+	pentry->callback = req->callback;
+	pentry->callback_arg = req->callback_arg;
+	info->pentry = pentry;
+	pentry->busy = true;
+	atomic64_inc(&pqueue->pending_count);
+
+	/* Send CPT command */
+	info->pentry = pentry;
+	info->time_in = jiffies;
+	info->req = req;
+
+	/* Create the CPT_INST_S type command for HW intrepretation */
+	cptinst.s.doneint = true;
+	cptinst.s.res_addr = (u64)info->comp_baddr;
+	cptinst.s.tag = 0;
+	cptinst.s.grp = 0;
+	cptinst.s.wq_ptr = 0;
+	cptinst.s.ei0 = vq_cmd.cmd.u64;
+	cptinst.s.ei1 = vq_cmd.dptr;
+	cptinst.s.ei2 = vq_cmd.rptr;
+	cptinst.s.ei3 = vq_cmd.cptr.u64;
+
+	ret = send_cpt_command(cptvf, &cptinst, queue);
+	spin_unlock_bh(&pqueue->lock);
+	if (unlikely(ret)) {
+		dev_err(&pdev->dev, "Send command failed for AE\n");
+		ret = -EFAULT;
+		goto request_cleanup;
+	}
+
+	return 0;
+
+request_cleanup:
+	dev_dbg(&pdev->dev, "Failed to submit CPT command\n");
+	do_request_cleanup(cptvf, info);
+
+	return ret;
+}
+
+void vq_post_process(struct cpt_vf *cptvf, u32 qno)
+{
+	struct pci_dev *pdev = cptvf->pdev;
+
+	if (unlikely(qno > cptvf->nr_queues)) {
+		dev_err(&pdev->dev, "Request for post processing on invalid pending queue: %u\n",
+			qno);
+		return;
+	}
+
+	process_pending_queue(cptvf, &cptvf->pqinfo, qno);
+}
+
+int cptvf_do_request(void *vfdev, struct cpt_request_info *req)
+{
+	struct cpt_vf *cptvf = (struct cpt_vf *)vfdev;
+	struct pci_dev *pdev = cptvf->pdev;
+
+	if (!cpt_device_ready(cptvf)) {
+		dev_err(&pdev->dev, "CPT Device is not ready");
+		return -ENODEV;
+	}
+
+	if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) {
+		dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
+			cptvf->vfid);
+		return -EINVAL;
+	} else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) {
+		dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
+			cptvf->vfid);
+		return -EINVAL;
+	}
+
+	return process_request(cptvf, req);
+}
diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h
new file mode 100644
index 0000000..80ee074
--- /dev/null
+++ b/drivers/crypto/cavium/cpt/request_manager.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __REQUEST_MANAGER_H
+#define __REQUEST_MANAGER_H
+
+#include "cpt_common.h"
+
+#define TIME_IN_RESET_COUNT  5
+#define COMPLETION_CODE_SIZE 8
+#define COMPLETION_CODE_INIT 0
+#define PENDING_THOLD  100
+#define MAX_SG_IN_CNT 12
+#define MAX_SG_OUT_CNT 13
+#define SG_LIST_HDR_SIZE  8
+#define MAX_BUF_CNT	16
+
+union ctrl_info {
+	u32 flags;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u32 reserved0:26;
+		u32 grp:3; /* Group bits */
+		u32 dma_mode:2; /* DMA mode */
+		u32 se_req:1;/* To SE core */
+#else
+		u32 se_req:1; /* To SE core */
+		u32 dma_mode:2; /* DMA mode */
+		u32 grp:3; /* Group bits */
+		u32 reserved0:26;
+#endif
+	} s;
+};
+
+union opcode_info {
+	u16 flags;
+	struct {
+		u8 major;
+		u8 minor;
+	} s;
+};
+
+struct cptvf_request {
+	union opcode_info opcode;
+	u16 param1;
+	u16 param2;
+	u16 dlen;
+};
+
+struct buf_ptr {
+	u8 *vptr;
+	dma_addr_t dma_addr;
+	u16 size;
+};
+
+struct cpt_request_info {
+	u8 incnt; /* Number of input buffers */
+	u8 outcnt; /* Number of output buffers */
+	u16 rlen; /* Output length */
+	union ctrl_info ctrl; /* User control information */
+	struct cptvf_request req; /* Request Information (Core specific) */
+
+	struct buf_ptr in[MAX_BUF_CNT];
+	struct buf_ptr out[MAX_BUF_CNT];
+
+	void (*callback)(int, void *); /* Kernel ASYNC request callabck */
+	void *callback_arg; /* Kernel ASYNC request callabck arg */
+};
+
+struct sglist_component {
+	union {
+		u64 len;
+		struct {
+			u16 len0;
+			u16 len1;
+			u16 len2;
+			u16 len3;
+		} s;
+	} u;
+	u64 ptr0;
+	u64 ptr1;
+	u64 ptr2;
+	u64 ptr3;
+};
+
+struct cpt_info_buffer {
+	struct cpt_vf *cptvf;
+	unsigned long time_in;
+	u8 extra_time;
+
+	struct cpt_request_info *req;
+	dma_addr_t dptr_baddr;
+	u32 dlen;
+	dma_addr_t rptr_baddr;
+	dma_addr_t comp_baddr;
+	u8 *in_buffer;
+	u8 *out_buffer;
+	u8 *gather_components;
+	u8 *scatter_components;
+
+	struct pending_entry *pentry;
+	volatile u64 *completion_addr;
+	volatile u64 *alternate_caddr;
+};
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+union vq_cmd_word0 {
+	u64 u64;
+	struct {
+		u16 opcode;
+		u16 param1;
+		u16 param2;
+		u16 dlen;
+	} s;
+};
+
+union vq_cmd_word3 {
+	u64 u64;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 grp:3;
+		u64 cptr:61;
+#else
+		u64 cptr:61;
+		u64 grp:3;
+#endif
+	} s;
+};
+
+struct cpt_vq_command {
+	union vq_cmd_word0 cmd;
+	u64 dptr;
+	u64 rptr;
+	union vq_cmd_word3 cptr;
+};
+
+void vq_post_process(struct cpt_vf *cptvf, u32 qno);
+int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req);
+#endif /* __REQUEST_MANAGER_H */
diff --git a/drivers/crypto/cavium/nitrox/Kconfig b/drivers/crypto/cavium/nitrox/Kconfig
new file mode 100644
index 0000000..181a1df
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/Kconfig
@@ -0,0 +1,20 @@
+#
+# Cavium NITROX Crypto Device configuration
+#
+config CRYPTO_DEV_NITROX
+	tristate
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select FW_LOADER
+
+config CRYPTO_DEV_NITROX_CNN55XX
+	tristate "Support for Cavium CNN55XX driver"
+	depends on PCI_MSI && 64BIT
+	select CRYPTO_DEV_NITROX
+	help
+	  Support for Cavium NITROX family CNN55XX driver
+	  for accelerating crypto workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called n5pf.
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
new file mode 100644
index 0000000..45b7379
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_NITROX_CNN55XX) += n5pf.o
+
+n5pf-objs := nitrox_main.o \
+	nitrox_isr.o \
+	nitrox_lib.o \
+	nitrox_hal.o \
+	nitrox_reqmgr.o \
+	nitrox_algs.o
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
new file mode 100644
index 0000000..2ae6124
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include <crypto/aes.h>
+#include <crypto/skcipher.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_common.h"
+#include "nitrox_req.h"
+
+#define PRIO 4001
+
+struct nitrox_cipher {
+	const char *name;
+	enum flexi_cipher value;
+};
+
+/**
+ * supported cipher list
+ */
+static const struct nitrox_cipher flexi_cipher_table[] = {
+	{ "null",		CIPHER_NULL },
+	{ "cbc(des3_ede)",	CIPHER_3DES_CBC },
+	{ "ecb(des3_ede)",	CIPHER_3DES_ECB },
+	{ "cbc(aes)",		CIPHER_AES_CBC },
+	{ "ecb(aes)",		CIPHER_AES_ECB },
+	{ "cfb(aes)",		CIPHER_AES_CFB },
+	{ "rfc3686(ctr(aes))",	CIPHER_AES_CTR },
+	{ "xts(aes)",		CIPHER_AES_XTS },
+	{ "cts(cbc(aes))",	CIPHER_AES_CBC_CTS },
+	{ NULL,			CIPHER_INVALID }
+};
+
+static enum flexi_cipher flexi_cipher_type(const char *name)
+{
+	const struct nitrox_cipher *cipher = flexi_cipher_table;
+
+	while (cipher->name) {
+		if (!strcmp(cipher->name, name))
+			break;
+		cipher++;
+	}
+	return cipher->value;
+}
+
+static int flexi_aes_keylen(int keylen)
+{
+	int aes_keylen;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		aes_keylen = 1;
+		break;
+	case AES_KEYSIZE_192:
+		aes_keylen = 2;
+		break;
+	case AES_KEYSIZE_256:
+		aes_keylen = 3;
+		break;
+	default:
+		aes_keylen = -EINVAL;
+		break;
+	}
+	return aes_keylen;
+}
+
+static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
+{
+	struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+	void *fctx;
+
+	/* get the first device */
+	nctx->ndev = nitrox_get_first_device();
+	if (!nctx->ndev)
+		return -ENODEV;
+
+	/* allocate nitrox crypto context */
+	fctx = crypto_alloc_context(nctx->ndev);
+	if (!fctx) {
+		nitrox_put_device(nctx->ndev);
+		return -ENOMEM;
+	}
+	nctx->u.ctx_handle = (uintptr_t)fctx;
+	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
+				    sizeof(struct nitrox_kcrypt_request));
+	return 0;
+}
+
+static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
+{
+	struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+
+	/* free the nitrox crypto context */
+	if (nctx->u.ctx_handle) {
+		struct flexi_crypto_context *fctx = nctx->u.fctx;
+
+		memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
+		memset(&fctx->auth, 0, sizeof(struct auth_keys));
+		crypto_free_context((void *)fctx);
+	}
+	nitrox_put_device(nctx->ndev);
+
+	nctx->u.ctx_handle = 0;
+	nctx->ndev = NULL;
+}
+
+static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher,
+					 int aes_keylen, const u8 *key,
+					 unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+	struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+	struct flexi_crypto_context *fctx;
+	enum flexi_cipher cipher_type;
+	const char *name;
+
+	name = crypto_tfm_alg_name(tfm);
+	cipher_type = flexi_cipher_type(name);
+	if (unlikely(cipher_type == CIPHER_INVALID)) {
+		pr_err("unsupported cipher: %s\n", name);
+		return -EINVAL;
+	}
+
+	/* fill crypto context */
+	fctx = nctx->u.fctx;
+	fctx->flags = 0;
+	fctx->w0.cipher_type = cipher_type;
+	fctx->w0.aes_keylen = aes_keylen;
+	fctx->w0.iv_source = IV_FROM_DPTR;
+	fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0);
+	/* copy the key to context */
+	memcpy(fctx->crypto.u.key, key, keylen);
+
+	return 0;
+}
+
+static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+			     unsigned int keylen)
+{
+	int aes_keylen;
+
+	aes_keylen = flexi_aes_keylen(keylen);
+	if (aes_keylen < 0) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static void nitrox_skcipher_callback(struct skcipher_request *skreq,
+				     int err)
+{
+	if (err) {
+		pr_err_ratelimited("request failed status 0x%0x\n", err);
+		err = -EINVAL;
+	}
+	skcipher_request_complete(skreq, err);
+}
+
+static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+	struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
+	struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+	int ivsize = crypto_skcipher_ivsize(cipher);
+	struct se_crypto_request *creq;
+
+	creq = &nkreq->creq;
+	creq->flags = skreq->base.flags;
+	creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		     GFP_KERNEL : GFP_ATOMIC;
+
+	/* fill the request */
+	creq->ctrl.value = 0;
+	creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
+	creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT);
+	/* param0: length of the data to be encrypted */
+	creq->gph.param0 = cpu_to_be16(skreq->cryptlen);
+	creq->gph.param1 = 0;
+	/* param2: encryption data offset */
+	creq->gph.param2 = cpu_to_be16(ivsize);
+	creq->gph.param3 = 0;
+
+	creq->ctx_handle = nctx->u.ctx_handle;
+	creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
+
+	/* copy the iv */
+	memcpy(creq->iv, skreq->iv, ivsize);
+	creq->ivsize = ivsize;
+	creq->src = skreq->src;
+	creq->dst = skreq->dst;
+
+	nkreq->nctx = nctx;
+	nkreq->skreq = skreq;
+
+	/* send the crypto request */
+	return nitrox_process_se_request(nctx->ndev, creq,
+					 nitrox_skcipher_callback, skreq);
+}
+
+static int nitrox_aes_encrypt(struct skcipher_request *skreq)
+{
+	return nitrox_skcipher_crypt(skreq, true);
+}
+
+static int nitrox_aes_decrypt(struct skcipher_request *skreq)
+{
+	return nitrox_skcipher_crypt(skreq, false);
+}
+
+static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
+			      const u8 *key, unsigned int keylen)
+{
+	if (keylen != DES3_EDE_KEY_SIZE) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	return nitrox_skcipher_setkey(cipher, 0, key, keylen);
+}
+
+static int nitrox_3des_encrypt(struct skcipher_request *skreq)
+{
+	return nitrox_skcipher_crypt(skreq, true);
+}
+
+static int nitrox_3des_decrypt(struct skcipher_request *skreq)
+{
+	return nitrox_skcipher_crypt(skreq, false);
+}
+
+static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
+				 const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+	struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+	struct flexi_crypto_context *fctx;
+	int aes_keylen, ret;
+
+	ret = xts_check_key(tfm, key, keylen);
+	if (ret)
+		return ret;
+
+	keylen /= 2;
+
+	aes_keylen = flexi_aes_keylen(keylen);
+	if (aes_keylen < 0) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	fctx = nctx->u.fctx;
+	/* copy KEY2 */
+	memcpy(fctx->auth.u.key2, (key + keylen), keylen);
+
+	return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
+					 const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+	struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+	struct flexi_crypto_context *fctx;
+	int aes_keylen;
+
+	if (keylen < CTR_RFC3686_NONCE_SIZE)
+		return -EINVAL;
+
+	fctx = nctx->u.fctx;
+
+	memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE),
+	       CTR_RFC3686_NONCE_SIZE);
+
+	keylen -= CTR_RFC3686_NONCE_SIZE;
+
+	aes_keylen = flexi_aes_keylen(keylen);
+	if (aes_keylen < 0) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static struct skcipher_alg nitrox_skciphers[] = { {
+	.base = {
+		.cra_name = "cbc(aes)",
+		.cra_driver_name = "n5_cbc(aes)",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+	},
+	.min_keysize = AES_MIN_KEY_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE,
+	.ivsize = AES_BLOCK_SIZE,
+	.setkey = nitrox_aes_setkey,
+	.encrypt = nitrox_aes_encrypt,
+	.decrypt = nitrox_aes_decrypt,
+	.init = nitrox_skcipher_init,
+	.exit = nitrox_skcipher_exit,
+}, {
+	.base = {
+		.cra_name = "ecb(aes)",
+		.cra_driver_name = "n5_ecb(aes)",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+	},
+	.min_keysize = AES_MIN_KEY_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE,
+	.ivsize = AES_BLOCK_SIZE,
+	.setkey = nitrox_aes_setkey,
+	.encrypt = nitrox_aes_encrypt,
+	.decrypt = nitrox_aes_decrypt,
+	.init = nitrox_skcipher_init,
+	.exit = nitrox_skcipher_exit,
+}, {
+	.base = {
+		.cra_name = "cfb(aes)",
+		.cra_driver_name = "n5_cfb(aes)",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+	},
+	.min_keysize = AES_MIN_KEY_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE,
+	.ivsize = AES_BLOCK_SIZE,
+	.setkey = nitrox_aes_setkey,
+	.encrypt = nitrox_aes_encrypt,
+	.decrypt = nitrox_aes_decrypt,
+	.init = nitrox_skcipher_init,
+	.exit = nitrox_skcipher_exit,
+}, {
+	.base = {
+		.cra_name = "xts(aes)",
+		.cra_driver_name = "n5_xts(aes)",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+	},
+	.min_keysize = 2 * AES_MIN_KEY_SIZE,
+	.max_keysize = 2 * AES_MAX_KEY_SIZE,
+	.ivsize = AES_BLOCK_SIZE,
+	.setkey = nitrox_aes_xts_setkey,
+	.encrypt = nitrox_aes_encrypt,
+	.decrypt = nitrox_aes_decrypt,
+	.init = nitrox_skcipher_init,
+	.exit = nitrox_skcipher_exit,
+}, {
+	.base = {
+		.cra_name = "rfc3686(ctr(aes))",
+		.cra_driver_name = "n5_rfc3686(ctr(aes))",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = 1,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+	},
+	.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+	.ivsize = CTR_RFC3686_IV_SIZE,
+	.init = nitrox_skcipher_init,
+	.exit = nitrox_skcipher_exit,
+	.setkey = nitrox_aes_ctr_rfc3686_setkey,
+	.encrypt = nitrox_aes_encrypt,
+	.decrypt = nitrox_aes_decrypt,
+}, {
+	.base = {
+		.cra_name = "cts(cbc(aes))",
+		.cra_driver_name = "n5_cts(cbc(aes))",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_module = THIS_MODULE,
+	},
+	.min_keysize = AES_MIN_KEY_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE,
+	.ivsize = AES_BLOCK_SIZE,
+	.setkey = nitrox_aes_setkey,
+	.encrypt = nitrox_aes_encrypt,
+	.decrypt = nitrox_aes_decrypt,
+	.init = nitrox_skcipher_init,
+	.exit = nitrox_skcipher_exit,
+}, {
+	.base = {
+		.cra_name = "cbc(des3_ede)",
+		.cra_driver_name = "n5_cbc(des3_ede)",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+	},
+	.min_keysize = DES3_EDE_KEY_SIZE,
+	.max_keysize = DES3_EDE_KEY_SIZE,
+	.ivsize = DES3_EDE_BLOCK_SIZE,
+	.setkey = nitrox_3des_setkey,
+	.encrypt = nitrox_3des_encrypt,
+	.decrypt = nitrox_3des_decrypt,
+	.init = nitrox_skcipher_init,
+	.exit = nitrox_skcipher_exit,
+}, {
+	.base = {
+		.cra_name = "ecb(des3_ede)",
+		.cra_driver_name = "n5_ecb(des3_ede)",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+	},
+	.min_keysize = DES3_EDE_KEY_SIZE,
+	.max_keysize = DES3_EDE_KEY_SIZE,
+	.ivsize = DES3_EDE_BLOCK_SIZE,
+	.setkey = nitrox_3des_setkey,
+	.encrypt = nitrox_3des_encrypt,
+	.decrypt = nitrox_3des_decrypt,
+	.init = nitrox_skcipher_init,
+	.exit = nitrox_skcipher_exit,
+}
+
+};
+
+int nitrox_crypto_register(void)
+{
+	return crypto_register_skciphers(nitrox_skciphers,
+					 ARRAY_SIZE(nitrox_skciphers));
+}
+
+void nitrox_crypto_unregister(void)
+{
+	crypto_unregister_skciphers(nitrox_skciphers,
+				    ARRAY_SIZE(nitrox_skciphers));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
new file mode 100644
index 0000000..312f728
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_COMMON_H
+#define __NITROX_COMMON_H
+
+#include "nitrox_dev.h"
+#include "nitrox_req.h"
+
+int nitrox_crypto_register(void);
+void nitrox_crypto_unregister(void);
+void *crypto_alloc_context(struct nitrox_device *ndev);
+void crypto_free_context(void *ctx);
+struct nitrox_device *nitrox_get_first_device(void);
+void nitrox_put_device(struct nitrox_device *ndev);
+
+void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
+int nitrox_pf_init_isr(struct nitrox_device *ndev);
+
+int nitrox_common_sw_init(struct nitrox_device *ndev);
+void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
+
+void pkt_slc_resp_handler(unsigned long data);
+int nitrox_process_se_request(struct nitrox_device *ndev,
+			      struct se_crypto_request *req,
+			      completion_t cb,
+			      struct skcipher_request *skreq);
+void backlog_qflush_work(struct work_struct *work);
+
+void nitrox_config_emu_unit(struct nitrox_device *ndev);
+void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
+void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
+void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
+void nitrox_config_nps_unit(struct nitrox_device *ndev);
+void nitrox_config_pom_unit(struct nitrox_device *ndev);
+void nitrox_config_rand_unit(struct nitrox_device *ndev);
+void nitrox_config_efl_unit(struct nitrox_device *ndev);
+void nitrox_config_bmi_unit(struct nitrox_device *ndev);
+void nitrox_config_bmo_unit(struct nitrox_device *ndev);
+void nitrox_config_lbc_unit(struct nitrox_device *ndev);
+void invalidate_lbc(struct nitrox_device *ndev);
+void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
+void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
+
+#endif /* __NITROX_COMMON_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
new file mode 100644
index 0000000..9dcb7fd
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -0,0 +1,1085 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_CSR_H
+#define __NITROX_CSR_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
+/* EMU clusters */
+#define NR_CLUSTERS		4
+#define AE_CORES_PER_CLUSTER	20
+#define SE_CORES_PER_CLUSTER	16
+
+/* BIST registers */
+#define EMU_BIST_STATUSX(_i)	(0x1402700 + ((_i) * 0x40000))
+#define UCD_BIST_STATUS		0x12C0070
+#define NPS_CORE_BIST_REG	0x10000E8
+#define NPS_CORE_NPC_BIST_REG	0x1000128
+#define NPS_PKT_SLC_BIST_REG	0x1040088
+#define NPS_PKT_IN_BIST_REG	0x1040100
+#define POM_BIST_REG		0x11C0100
+#define BMI_BIST_REG		0x1140080
+#define EFL_CORE_BIST_REGX(_i)	(0x1240100 + ((_i) * 0x400))
+#define EFL_TOP_BIST_STAT	0x1241090
+#define BMO_BIST_REG		0x1180080
+#define LBC_BIST_STATUS		0x1200020
+#define PEM_BIST_STATUSX(_i)	(0x1080468 | ((_i) << 18))
+
+/* EMU registers */
+#define EMU_SE_ENABLEX(_i)	(0x1400000 + ((_i) * 0x40000))
+#define EMU_AE_ENABLEX(_i)	(0x1400008 + ((_i) * 0x40000))
+#define EMU_WD_INT_ENA_W1SX(_i)	(0x1402318 + ((_i) * 0x40000))
+#define EMU_GE_INT_ENA_W1SX(_i)	(0x1402518 + ((_i) * 0x40000))
+#define EMU_FUSE_MAPX(_i)	(0x1402708 + ((_i) * 0x40000))
+
+/* UCD registers */
+#define UCD_UCODE_LOAD_BLOCK_NUM	0x12C0010
+#define UCD_UCODE_LOAD_IDX_DATAX(_i)	(0x12C0018 + ((_i) * 0x20))
+#define UCD_SE_EID_UCODE_BLOCK_NUMX(_i)	(0x12C0000 + ((_i) * 0x1000))
+
+/* NPS core registers */
+#define NPS_CORE_GBL_VFCFG	0x1000000
+#define NPS_CORE_CONTROL	0x1000008
+#define NPS_CORE_INT_ACTIVE	0x1000080
+#define NPS_CORE_INT		0x10000A0
+#define NPS_CORE_INT_ENA_W1S	0x10000B8
+#define NPS_STATS_PKT_DMA_RD_CNT	0x1000180
+#define NPS_STATS_PKT_DMA_WR_CNT	0x1000190
+
+/* NPS packet registers */
+#define NPS_PKT_INT				0x1040018
+#define NPS_PKT_IN_RERR_HI		0x1040108
+#define NPS_PKT_IN_RERR_HI_ENA_W1S	0x1040120
+#define NPS_PKT_IN_RERR_LO		0x1040128
+#define NPS_PKT_IN_RERR_LO_ENA_W1S	0x1040140
+#define NPS_PKT_IN_ERR_TYPE		0x1040148
+#define NPS_PKT_IN_ERR_TYPE_ENA_W1S	0x1040160
+#define NPS_PKT_IN_INSTR_CTLX(_i)	(0x10060 + ((_i) * 0x40000))
+#define NPS_PKT_IN_INSTR_BADDRX(_i)	(0x10068 + ((_i) * 0x40000))
+#define NPS_PKT_IN_INSTR_RSIZEX(_i)	(0x10070 + ((_i) * 0x40000))
+#define NPS_PKT_IN_DONE_CNTSX(_i)	(0x10080 + ((_i) * 0x40000))
+#define NPS_PKT_IN_INSTR_BAOFF_DBELLX(_i)	(0x10078 + ((_i) * 0x40000))
+#define NPS_PKT_IN_INT_LEVELSX(_i)		(0x10088 + ((_i) * 0x40000))
+
+#define NPS_PKT_SLC_RERR_HI		0x1040208
+#define NPS_PKT_SLC_RERR_HI_ENA_W1S	0x1040220
+#define NPS_PKT_SLC_RERR_LO		0x1040228
+#define NPS_PKT_SLC_RERR_LO_ENA_W1S	0x1040240
+#define NPS_PKT_SLC_ERR_TYPE		0x1040248
+#define NPS_PKT_SLC_ERR_TYPE_ENA_W1S	0x1040260
+#define NPS_PKT_SLC_CTLX(_i)		(0x10000 + ((_i) * 0x40000))
+#define NPS_PKT_SLC_CNTSX(_i)		(0x10008 + ((_i) * 0x40000))
+#define NPS_PKT_SLC_INT_LEVELSX(_i)	(0x10010 + ((_i) * 0x40000))
+
+/* POM registers */
+#define POM_INT_ENA_W1S		0x11C0018
+#define POM_GRP_EXECMASKX(_i)	(0x11C1100 | ((_i) * 8))
+#define POM_INT		0x11C0000
+#define POM_PERF_CTL	0x11CC400
+
+/* BMI registers */
+#define BMI_INT		0x1140000
+#define BMI_CTL		0x1140020
+#define BMI_INT_ENA_W1S	0x1140018
+#define BMI_NPS_PKT_CNT	0x1140070
+
+/* EFL registers */
+#define EFL_CORE_INT_ENA_W1SX(_i)		(0x1240018 + ((_i) * 0x400))
+#define EFL_CORE_VF_ERR_INT0X(_i)		(0x1240050 + ((_i) * 0x400))
+#define EFL_CORE_VF_ERR_INT0_ENA_W1SX(_i)	(0x1240068 + ((_i) * 0x400))
+#define EFL_CORE_VF_ERR_INT1X(_i)		(0x1240070 + ((_i) * 0x400))
+#define EFL_CORE_VF_ERR_INT1_ENA_W1SX(_i)	(0x1240088 + ((_i) * 0x400))
+#define EFL_CORE_SE_ERR_INTX(_i)		(0x12400A0 + ((_i) * 0x400))
+#define EFL_RNM_CTL_STATUS			0x1241800
+#define EFL_CORE_INTX(_i)			(0x1240000 + ((_i) * 0x400))
+
+/* BMO registers */
+#define BMO_CTL2		0x1180028
+#define BMO_NPS_SLC_PKT_CNT	0x1180078
+
+/* LBC registers */
+#define LBC_INT			0x1200000
+#define LBC_INVAL_CTL		0x1201010
+#define LBC_PLM_VF1_64_INT	0x1202008
+#define LBC_INVAL_STATUS	0x1202010
+#define LBC_INT_ENA_W1S		0x1203000
+#define LBC_PLM_VF1_64_INT_ENA_W1S	0x1205008
+#define LBC_PLM_VF65_128_INT		0x1206008
+#define LBC_ELM_VF1_64_INT		0x1208000
+#define LBC_PLM_VF65_128_INT_ENA_W1S	0x1209008
+#define LBC_ELM_VF1_64_INT_ENA_W1S	0x120B000
+#define LBC_ELM_VF65_128_INT		0x120C000
+#define LBC_ELM_VF65_128_INT_ENA_W1S	0x120F000
+
+/* PEM registers */
+#define PEM0_INT 0x1080428
+
+/**
+ * struct emu_fuse_map - EMU Fuse Map Registers
+ * @ae_fuse: Fuse settings for AE 19..0
+ * @se_fuse: Fuse settings for SE 15..0
+ *
+ * A set bit indicates the unit is fuse disabled.
+ */
+union emu_fuse_map {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 valid : 1;
+		u64 raz_52_62 : 11;
+		u64 ae_fuse : 20;
+		u64 raz_16_31 : 16;
+		u64 se_fuse : 16;
+#else
+		u64 se_fuse : 16;
+		u64 raz_16_31 : 16;
+		u64 ae_fuse : 20;
+		u64 raz_52_62 : 11;
+		u64 valid : 1;
+#endif
+	} s;
+};
+
+/**
+ * struct emu_se_enable - Symmetric Engine Enable Registers
+ * @enable: Individual enables for each of the clusters
+ *   16 symmetric engines.
+ */
+union emu_se_enable {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz	: 48;
+		u64 enable : 16;
+#else
+		u64 enable : 16;
+		u64 raz	: 48;
+#endif
+	} s;
+};
+
+/**
+ * struct emu_ae_enable - EMU Asymmetric engines.
+ * @enable: Individual enables for each of the cluster's
+ *   20 Asymmetric Engines.
+ */
+union emu_ae_enable {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz	: 44;
+		u64 enable : 20;
+#else
+		u64 enable : 20;
+		u64 raz	: 44;
+#endif
+	} s;
+};
+
+/**
+ * struct emu_wd_int_ena_w1s - EMU Interrupt Enable Registers
+ * @ae_wd: Reads or sets enable for EMU(0..3)_WD_INT[AE_WD]
+ * @se_wd: Reads or sets enable for EMU(0..3)_WD_INT[SE_WD]
+ */
+union emu_wd_int_ena_w1s {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz2 : 12;
+		u64 ae_wd : 20;
+		u64 raz1 : 16;
+		u64 se_wd : 16;
+#else
+		u64 se_wd : 16;
+		u64 raz1 : 16;
+		u64 ae_wd : 20;
+		u64 raz2 : 12;
+#endif
+	} s;
+};
+
+/**
+ * struct emu_ge_int_ena_w1s - EMU Interrupt Enable set registers
+ * @ae_ge: Reads or sets enable for EMU(0..3)_GE_INT[AE_GE]
+ * @se_ge: Reads or sets enable for EMU(0..3)_GE_INT[SE_GE]
+ */
+union emu_ge_int_ena_w1s {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_52_63 : 12;
+		u64 ae_ge : 20;
+		u64 raz_16_31: 16;
+		u64 se_ge : 16;
+#else
+		u64 se_ge : 16;
+		u64 raz_16_31: 16;
+		u64 ae_ge : 20;
+		u64 raz_52_63 : 12;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_pkt_slc_ctl - Solicited Packet Out Control Registers
+ * @rh: Indicates whether to remove or include the response header
+ *   1 = Include, 0 = Remove
+ * @z: If set, 8 trailing 0x00 bytes will be added to the end of the
+ *   outgoing packet.
+ * @enb: Enable for this port.
+ */
+union nps_pkt_slc_ctl {
+	u64 value;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 raz : 61;
+		u64 rh : 1;
+		u64 z : 1;
+		u64 enb : 1;
+#else
+		u64 enb : 1;
+		u64 z : 1;
+		u64 rh : 1;
+		u64 raz : 61;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_pkt_slc_cnts - Solicited Packet Out Count Registers
+ * @slc_int: Returns a 1 when:
+ *   NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
+ *   NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET].
+ *   To clear the bit, the CNTS register must be written to clear.
+ * @in_int: Returns a 1 when:
+ *   NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT].
+ *   To clear the bit, the DONE_CNTS register must be written to clear.
+ * @mbox_int: Returns a 1 when:
+ *   NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. To clear the bit,
+ *   write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] with 1.
+ * @timer: Timer, incremented every 2048 coprocessor clock cycles
+ *   when [CNT] is not zero. The hardware clears both [TIMER] and
+ *   [INT] when [CNT] goes to 0.
+ * @cnt: Packet counter. Hardware adds to [CNT] as it sends packets out.
+ *   On a write to this CSR, hardware subtracts the amount written to the
+ *   [CNT] field from [CNT].
+ */
+union nps_pkt_slc_cnts {
+	u64 value;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 slc_int : 1;
+		u64 uns_int : 1;
+		u64 in_int : 1;
+		u64 mbox_int : 1;
+		u64 resend : 1;
+		u64 raz : 5;
+		u64 timer : 22;
+		u64 cnt : 32;
+#else
+		u64 cnt	: 32;
+		u64 timer : 22;
+		u64 raz	: 5;
+		u64 resend : 1;
+		u64 mbox_int : 1;
+		u64 in_int : 1;
+		u64 uns_int : 1;
+		u64 slc_int : 1;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_pkt_slc_int_levels - Solicited Packet Out Interrupt Levels
+ *   Registers.
+ * @bmode: Determines whether NPS_PKT_SLC_CNTS[CNT] is a byte or
+ *   packet counter.
+ * @timet: Output port counter time interrupt threshold.
+ * @cnt: Output port counter interrupt threshold.
+ */
+union nps_pkt_slc_int_levels {
+	u64 value;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 bmode : 1;
+		u64 raz	: 9;
+		u64 timet : 22;
+		u64 cnt	: 32;
+#else
+		u64 cnt : 32;
+		u64 timet : 22;
+		u64 raz : 9;
+		u64 bmode : 1;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_pkt_inst - NPS Packet Interrupt Register
+ * @in_err: Set when any NPS_PKT_IN_RERR_HI/LO bit and
+ *    corresponding NPS_PKT_IN_RERR_*_ENA_* bit are bot set.
+ * @uns_err: Set when any NSP_PKT_UNS_RERR_HI/LO bit and
+ *    corresponding NPS_PKT_UNS_RERR_*_ENA_* bit are both set.
+ * @slc_er: Set when any NSP_PKT_SLC_RERR_HI/LO bit and
+ *    corresponding NPS_PKT_SLC_RERR_*_ENA_* bit are both set.
+ */
+union nps_pkt_int {
+	u64 value;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 raz	: 54;
+		u64 uns_wto : 1;
+		u64 in_err : 1;
+		u64 uns_err : 1;
+		u64 slc_err : 1;
+		u64 in_dbe : 1;
+		u64 in_sbe : 1;
+		u64 uns_dbe : 1;
+		u64 uns_sbe : 1;
+		u64 slc_dbe : 1;
+		u64 slc_sbe : 1;
+#else
+		u64 slc_sbe : 1;
+		u64 slc_dbe : 1;
+		u64 uns_sbe : 1;
+		u64 uns_dbe : 1;
+		u64 in_sbe : 1;
+		u64 in_dbe : 1;
+		u64 slc_err : 1;
+		u64 uns_err : 1;
+		u64 in_err : 1;
+		u64 uns_wto : 1;
+		u64 raz	: 54;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_pkt_in_done_cnts - Input instruction ring counts registers
+ * @slc_cnt: Returns a 1 when:
+ *    NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
+ *    NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SCL(i)_INT_LEVELS[TIMET]
+ *    To clear the bit, the CNTS register must be
+ *    written to clear the underlying condition
+ * @uns_int: Return a 1 when:
+ *    NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT], or
+ *    NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
+ *    To clear the bit, the CNTS register must be
+ *    written to clear the underlying condition
+ * @in_int: Returns a 1 when:
+ *    NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
+ *    To clear the bit, the DONE_CNTS register
+ *    must be written to clear the underlying condition
+ * @mbox_int: Returns a 1 when:
+ *    NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set.
+ *    To clear the bit, write NPS_PKT_MBOX_PF_VF(i)_INT[INTR]
+ *    with 1.
+ * @resend: A write of 1 will resend an MSI-X interrupt message if any
+ *    of the following conditions are true for this ring "i".
+ *    NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT]
+ *    NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET]
+ *    NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT]
+ *    NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
+ *    NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
+ *    NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set
+ * @cnt: Packet counter. Hardware adds to [CNT] as it reads
+ *    packets. On a write to this CSR, hardware substracts the
+ *    amount written to the [CNT] field from [CNT], which will
+ *    clear PKT_IN(i)_INT_STATUS[INTR] if [CNT] becomes <=
+ *    NPS_PKT_IN(i)_INT_LEVELS[CNT]. This register should be
+ *    cleared before enabling a ring by reading the current
+ *    value and writing it back.
+ */
+union nps_pkt_in_done_cnts {
+	u64 value;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 slc_int : 1;
+		u64 uns_int : 1;
+		u64 in_int : 1;
+		u64 mbox_int : 1;
+		u64 resend : 1;
+		u64 raz : 27;
+		u64 cnt	: 32;
+#else
+		u64 cnt	: 32;
+		u64 raz	: 27;
+		u64 resend : 1;
+		u64 mbox_int : 1;
+		u64 in_int : 1;
+		u64 uns_int : 1;
+		u64 slc_int : 1;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_pkt_in_instr_ctl - Input Instruction Ring Control Registers.
+ * @is64b: If 1, the ring uses 64-byte instructions. If 0, the
+ *   ring uses 32-byte instructions.
+ * @enb: Enable for the input ring.
+ */
+union nps_pkt_in_instr_ctl {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz	: 62;
+		u64 is64b : 1;
+		u64 enb	: 1;
+#else
+		u64 enb	: 1;
+		u64 is64b : 1;
+		u64 raz : 62;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_pkt_in_instr_rsize - Input instruction ring size registers
+ * @rsize: Ring size (number of instructions)
+ */
+union nps_pkt_in_instr_rsize {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz	: 32;
+		u64 rsize : 32;
+#else
+		u64 rsize : 32;
+		u64 raz	: 32;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_pkt_in_instr_baoff_dbell - Input instruction ring
+ *   base address offset and doorbell registers
+ * @aoff: Address offset. The offset from the NPS_PKT_IN_INSTR_BADDR
+ *   where the next pointer is read.
+ * @dbell: Pointer list doorbell count. Write operations to this field
+ *   increments the present value here. Read operations return the
+ *   present value.
+ */
+union nps_pkt_in_instr_baoff_dbell {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 aoff : 32;
+		u64 dbell : 32;
+#else
+		u64 dbell : 32;
+		u64 aoff : 32;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_core_int_ena_w1s - NPS core interrupt enable set register
+ * @host_nps_wr_err: Reads or sets enable for
+ *   NPS_CORE_INT[HOST_NPS_WR_ERR].
+ * @npco_dma_malform: Reads or sets enable for
+ *   NPS_CORE_INT[NPCO_DMA_MALFORM].
+ * @exec_wr_timeout: Reads or sets enable for
+ *   NPS_CORE_INT[EXEC_WR_TIMEOUT].
+ * @host_wr_timeout: Reads or sets enable for
+ *   NPS_CORE_INT[HOST_WR_TIMEOUT].
+ * @host_wr_err: Reads or sets enable for
+ *   NPS_CORE_INT[HOST_WR_ERR]
+ */
+union nps_core_int_ena_w1s {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz4 : 55;
+		u64 host_nps_wr_err : 1;
+		u64 npco_dma_malform : 1;
+		u64 exec_wr_timeout : 1;
+		u64 host_wr_timeout : 1;
+		u64 host_wr_err : 1;
+		u64 raz3 : 1;
+		u64 raz2 : 1;
+		u64 raz1 : 1;
+		u64 raz0 : 1;
+#else
+		u64 raz0 : 1;
+		u64 raz1 : 1;
+		u64 raz2 : 1;
+		u64 raz3 : 1;
+		u64 host_wr_err	: 1;
+		u64 host_wr_timeout : 1;
+		u64 exec_wr_timeout : 1;
+		u64 npco_dma_malform : 1;
+		u64 host_nps_wr_err : 1;
+		u64 raz4 : 55;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_core_gbl_vfcfg - Global VF Configuration Register.
+ * @ilk_disable: When set, this bit indicates that the ILK interface has
+ *    been disabled.
+ * @obaf: BMO allocation control
+ *    0 = allocate per queue
+ *    1 = allocate per VF
+ * @ibaf: BMI allocation control
+ *    0 = allocate per queue
+ *    1 = allocate per VF
+ * @zaf: ZIP allocation control
+ *    0 = allocate per queue
+ *    1 = allocate per VF
+ * @aeaf: AE allocation control
+ *    0 = allocate per queue
+ *    1 = allocate per VF
+ * @seaf: SE allocation control
+ *    0 = allocation per queue
+ *    1 = allocate per VF
+ * @cfg: VF/PF mode.
+ */
+union nps_core_gbl_vfcfg {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64  raz :55;
+		u64  ilk_disable :1;
+		u64  obaf :1;
+		u64  ibaf :1;
+		u64  zaf :1;
+		u64  aeaf :1;
+		u64  seaf :1;
+		u64  cfg :3;
+#else
+		u64  cfg :3;
+		u64  seaf :1;
+		u64  aeaf :1;
+		u64  zaf :1;
+		u64  ibaf :1;
+		u64  obaf :1;
+		u64  ilk_disable :1;
+		u64  raz :55;
+#endif
+	} s;
+};
+
+/**
+ * struct nps_core_int_active - NPS Core Interrupt Active Register
+ * @resend: Resend MSI-X interrupt if needs to handle interrupts
+ *    Sofware can set this bit and then exit the ISR.
+ * @ocla: Set when any OCLA(0)_INT and corresponding OCLA(0_INT_ENA_W1C
+ *    bit are set
+ * @mbox: Set when any NPS_PKT_MBOX_INT_LO/HI and corresponding
+ *    NPS_PKT_MBOX_INT_LO_ENA_W1C/HI_ENA_W1C bits are set
+ * @emu: bit i is set in [EMU] when any EMU(i)_INT bit is set
+ * @bmo: Set when any BMO_INT bit is set
+ * @bmi: Set when any BMI_INT bit is set or when any non-RO
+ *    BMI_INT and corresponding BMI_INT_ENA_W1C bits are both set
+ * @aqm: Set when any AQM_INT bit is set
+ * @zqm: Set when any ZQM_INT bit is set
+ * @efl: Set when any EFL_INT RO bit is set or when any non-RO EFL_INT
+ *    and corresponding EFL_INT_ENA_W1C bits are both set
+ * @ilk: Set when any ILK_INT bit is set
+ * @lbc: Set when any LBC_INT RO bit is set or when any non-RO LBC_INT
+ *    and corresponding LBC_INT_ENA_W1C bits are bot set
+ * @pem: Set when any PEM(0)_INT RO bit is set or when any non-RO
+ *    PEM(0)_INT and corresponding PEM(0)_INT_ENA_W1C bit are both set
+ * @ucd: Set when any UCD_INT bit is set
+ * @zctl: Set when any ZIP_INT RO bit is set or when any non-RO ZIP_INT
+ *    and corresponding ZIP_INT_ENA_W1C bits are both set
+ * @lbm: Set when any LBM_INT bit is set
+ * @nps_pkt: Set when any NPS_PKT_INT bit is set
+ * @nps_core: Set when any NPS_CORE_INT RO bit is set or when non-RO
+ *    NPS_CORE_INT and corresponding NSP_CORE_INT_ENA_W1C bits are both set
+ */
+union nps_core_int_active {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 resend : 1;
+		u64 raz	: 43;
+		u64 ocla : 1;
+		u64 mbox : 1;
+		u64 emu	: 4;
+		u64 bmo	: 1;
+		u64 bmi	: 1;
+		u64 aqm	: 1;
+		u64 zqm	: 1;
+		u64 efl	: 1;
+		u64 ilk	: 1;
+		u64 lbc	: 1;
+		u64 pem	: 1;
+		u64 pom	: 1;
+		u64 ucd	: 1;
+		u64 zctl : 1;
+		u64 lbm	: 1;
+		u64 nps_pkt : 1;
+		u64 nps_core : 1;
+#else
+		u64 nps_core : 1;
+		u64 nps_pkt : 1;
+		u64 lbm	: 1;
+		u64 zctl: 1;
+		u64 ucd	: 1;
+		u64 pom	: 1;
+		u64 pem	: 1;
+		u64 lbc	: 1;
+		u64 ilk	: 1;
+		u64 efl	: 1;
+		u64 zqm	: 1;
+		u64 aqm	: 1;
+		u64 bmi	: 1;
+		u64 bmo	: 1;
+		u64 emu	: 4;
+		u64 mbox : 1;
+		u64 ocla : 1;
+		u64 raz	: 43;
+		u64 resend : 1;
+#endif
+	} s;
+};
+
+/**
+ * struct efl_core_int - EFL Interrupt Registers
+ * @epci_decode_err: EPCI decoded a transacation that was unknown
+ *    This error should only occurred when there is a micrcode/SE error
+ *    and should be considered fatal
+ * @ae_err: An AE uncorrectable error occurred.
+ *    See EFL_CORE(0..3)_AE_ERR_INT
+ * @se_err: An SE uncorrectable error occurred.
+ *    See EFL_CORE(0..3)_SE_ERR_INT
+ * @dbe: Double-bit error occurred in EFL
+ * @sbe: Single-bit error occurred in EFL
+ * @d_left: Asserted when new POM-Header-BMI-data is
+ *    being sent to an Exec, and that Exec has Not read all BMI
+ *    data associated with the previous POM header
+ * @len_ovr: Asserted when an Exec-Read is issued that is more than
+ *    14 greater in length that the BMI data left to be read
+ */
+union efl_core_int {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz	: 57;
+		u64 epci_decode_err : 1;
+		u64 ae_err : 1;
+		u64 se_err : 1;
+		u64 dbe	: 1;
+		u64 sbe	: 1;
+		u64 d_left : 1;
+		u64 len_ovr : 1;
+#else
+		u64 len_ovr : 1;
+		u64 d_left : 1;
+		u64 sbe	: 1;
+		u64 dbe	: 1;
+		u64 se_err : 1;
+		u64 ae_err : 1;
+		u64 epci_decode_err  : 1;
+		u64 raz	: 57;
+#endif
+	} s;
+};
+
+/**
+ * struct efl_core_int_ena_w1s - EFL core interrupt enable set register
+ * @epci_decode_err: Reads or sets enable for
+ *   EFL_CORE(0..3)_INT[EPCI_DECODE_ERR].
+ * @d_left: Reads or sets enable for
+ *   EFL_CORE(0..3)_INT[D_LEFT].
+ * @len_ovr: Reads or sets enable for
+ *   EFL_CORE(0..3)_INT[LEN_OVR].
+ */
+union efl_core_int_ena_w1s {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_7_63 : 57;
+		u64 epci_decode_err : 1;
+		u64 raz_2_5 : 4;
+		u64 d_left : 1;
+		u64 len_ovr : 1;
+#else
+		u64 len_ovr : 1;
+		u64 d_left : 1;
+		u64 raz_2_5 : 4;
+		u64 epci_decode_err : 1;
+		u64 raz_7_63 : 57;
+#endif
+	} s;
+};
+
+/**
+ * struct efl_rnm_ctl_status - RNM Control and Status Register
+ * @ent_sel: Select input to RNM FIFO
+ * @exp_ent: Exported entropy enable for random number generator
+ * @rng_rst: Reset to RNG. Setting this bit to 1 cancels the generation
+ *    of the current random number.
+ * @rnm_rst: Reset the RNM. Setting this bit to 1 clears all sorted numbers
+ *    in the random number memory.
+ * @rng_en: Enabled the output of the RNG.
+ * @ent_en: Entropy enable for random number generator.
+ */
+union efl_rnm_ctl_status {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_9_63 : 55;
+		u64 ent_sel : 4;
+		u64 exp_ent : 1;
+		u64 rng_rst : 1;
+		u64 rnm_rst : 1;
+		u64 rng_en : 1;
+		u64 ent_en : 1;
+#else
+		u64 ent_en : 1;
+		u64 rng_en : 1;
+		u64 rnm_rst : 1;
+		u64 rng_rst : 1;
+		u64 exp_ent : 1;
+		u64 ent_sel : 4;
+		u64 raz_9_63 : 55;
+#endif
+	} s;
+};
+
+/**
+ * struct bmi_ctl - BMI control register
+ * @ilk_hdrq_thrsh: Maximum number of header queue locations
+ *   that ILK packets may consume. When the threshold is
+ *   exceeded ILK_XOFF is sent to the BMI_X2P_ARB.
+ * @nps_hdrq_thrsh: Maximum number of header queue locations
+ *   that NPS packets may consume. When the threshold is
+ *   exceeded NPS_XOFF is sent to the BMI_X2P_ARB.
+ * @totl_hdrq_thrsh: Maximum number of header queue locations
+ *   that the sum of ILK and NPS packets may consume.
+ * @ilk_free_thrsh: Maximum number of buffers that ILK packet
+ *   flows may consume before ILK_XOFF is sent to the BMI_X2P_ARB.
+ * @nps_free_thrsh: Maximum number of buffers that NPS packet
+ *   flows may consume before NPS XOFF is sent to the BMI_X2p_ARB.
+ * @totl_free_thrsh: Maximum number of buffers that bot ILK and NPS
+ *   packet flows may consume before both NPS_XOFF and ILK_XOFF
+ *   are asserted to the BMI_X2P_ARB.
+ * @max_pkt_len: Maximum packet length, integral number of 256B
+ *   buffers.
+ */
+union bmi_ctl {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_56_63 : 8;
+		u64 ilk_hdrq_thrsh : 8;
+		u64 nps_hdrq_thrsh : 8;
+		u64 totl_hdrq_thrsh : 8;
+		u64 ilk_free_thrsh : 8;
+		u64 nps_free_thrsh : 8;
+		u64 totl_free_thrsh : 8;
+		u64 max_pkt_len : 8;
+#else
+		u64 max_pkt_len : 8;
+		u64 totl_free_thrsh : 8;
+		u64 nps_free_thrsh : 8;
+		u64 ilk_free_thrsh : 8;
+		u64 totl_hdrq_thrsh : 8;
+		u64 nps_hdrq_thrsh : 8;
+		u64 ilk_hdrq_thrsh : 8;
+		u64 raz_56_63 : 8;
+#endif
+	} s;
+};
+
+/**
+ * struct bmi_int_ena_w1s - BMI interrupt enable set register
+ * @ilk_req_oflw: Reads or sets enable for
+ *   BMI_INT[ILK_REQ_OFLW].
+ * @nps_req_oflw: Reads or sets enable for
+ *   BMI_INT[NPS_REQ_OFLW].
+ * @fpf_undrrn: Reads or sets enable for
+ *   BMI_INT[FPF_UNDRRN].
+ * @eop_err_ilk: Reads or sets enable for
+ *   BMI_INT[EOP_ERR_ILK].
+ * @eop_err_nps: Reads or sets enable for
+ *   BMI_INT[EOP_ERR_NPS].
+ * @sop_err_ilk: Reads or sets enable for
+ *   BMI_INT[SOP_ERR_ILK].
+ * @sop_err_nps: Reads or sets enable for
+ *   BMI_INT[SOP_ERR_NPS].
+ * @pkt_rcv_err_ilk: Reads or sets enable for
+ *   BMI_INT[PKT_RCV_ERR_ILK].
+ * @pkt_rcv_err_nps: Reads or sets enable for
+ *   BMI_INT[PKT_RCV_ERR_NPS].
+ * @max_len_err_ilk: Reads or sets enable for
+ *   BMI_INT[MAX_LEN_ERR_ILK].
+ * @max_len_err_nps: Reads or sets enable for
+ *   BMI_INT[MAX_LEN_ERR_NPS].
+ */
+union bmi_int_ena_w1s {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_13_63	: 51;
+		u64 ilk_req_oflw : 1;
+		u64 nps_req_oflw : 1;
+		u64 raz_10 : 1;
+		u64 raz_9 : 1;
+		u64 fpf_undrrn	: 1;
+		u64 eop_err_ilk	: 1;
+		u64 eop_err_nps	: 1;
+		u64 sop_err_ilk	: 1;
+		u64 sop_err_nps	: 1;
+		u64 pkt_rcv_err_ilk : 1;
+		u64 pkt_rcv_err_nps : 1;
+		u64 max_len_err_ilk : 1;
+		u64 max_len_err_nps : 1;
+#else
+		u64 max_len_err_nps : 1;
+		u64 max_len_err_ilk : 1;
+		u64 pkt_rcv_err_nps : 1;
+		u64 pkt_rcv_err_ilk : 1;
+		u64 sop_err_nps	: 1;
+		u64 sop_err_ilk	: 1;
+		u64 eop_err_nps	: 1;
+		u64 eop_err_ilk	: 1;
+		u64 fpf_undrrn	: 1;
+		u64 raz_9 : 1;
+		u64 raz_10 : 1;
+		u64 nps_req_oflw : 1;
+		u64 ilk_req_oflw : 1;
+		u64 raz_13_63 : 51;
+#endif
+	} s;
+};
+
+/**
+ * struct bmo_ctl2 - BMO Control2 Register
+ * @arb_sel: Determines P2X Arbitration
+ * @ilk_buf_thrsh: Maximum number of buffers that the
+ *    ILK packet flows may consume before ILK XOFF is
+ *    asserted to the POM.
+ * @nps_slc_buf_thrsh: Maximum number of buffers that the
+ *    NPS_SLC packet flow may consume before NPS_SLC XOFF is
+ *    asserted to the POM.
+ * @nps_uns_buf_thrsh: Maximum number of buffers that the
+ *    NPS_UNS packet flow may consume before NPS_UNS XOFF is
+ *    asserted to the POM.
+ * @totl_buf_thrsh: Maximum number of buffers that ILK, NPS_UNS and
+ *    NPS_SLC packet flows may consume before NPS_UNS XOFF, NSP_SLC and
+ *    ILK_XOFF are all asserted POM.
+ */
+union bmo_ctl2 {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 arb_sel : 1;
+		u64 raz_32_62 : 31;
+		u64 ilk_buf_thrsh : 8;
+		u64 nps_slc_buf_thrsh : 8;
+		u64 nps_uns_buf_thrsh : 8;
+		u64 totl_buf_thrsh : 8;
+#else
+		u64 totl_buf_thrsh : 8;
+		u64 nps_uns_buf_thrsh : 8;
+		u64 nps_slc_buf_thrsh : 8;
+		u64 ilk_buf_thrsh : 8;
+		u64 raz_32_62 : 31;
+		u64 arb_sel : 1;
+#endif
+	} s;
+};
+
+/**
+ * struct pom_int_ena_w1s - POM interrupt enable set register
+ * @illegal_intf: Reads or sets enable for POM_INT[ILLEGAL_INTF].
+ * @illegal_dport: Reads or sets enable for POM_INT[ILLEGAL_DPORT].
+ */
+union pom_int_ena_w1s {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz2 : 60;
+		u64 illegal_intf : 1;
+		u64 illegal_dport : 1;
+		u64 raz1 : 1;
+		u64 raz0 : 1;
+#else
+		u64 raz0 : 1;
+		u64 raz1 : 1;
+		u64 illegal_dport : 1;
+		u64 illegal_intf : 1;
+		u64 raz2 : 60;
+#endif
+	} s;
+};
+
+/**
+ * struct lbc_inval_ctl - LBC invalidation control register
+ * @wait_timer: Wait timer for wait state. [WAIT_TIMER] must
+ *   always be written with its reset value.
+ * @cam_inval_start: Software should write [CAM_INVAL_START]=1
+ *   to initiate an LBC cache invalidation. After this, software
+ *   should read LBC_INVAL_STATUS until LBC_INVAL_STATUS[DONE] is set.
+ *   LBC hardware clears [CAVM_INVAL_START] before software can
+ *   observed LBC_INVAL_STATUS[DONE] to be set
+ */
+union lbc_inval_ctl {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz2 : 48;
+		u64 wait_timer : 8;
+		u64 raz1 : 6;
+		u64 cam_inval_start : 1;
+		u64 raz0 : 1;
+#else
+		u64 raz0 : 1;
+		u64 cam_inval_start : 1;
+		u64 raz1 : 6;
+		u64 wait_timer : 8;
+		u64 raz2 : 48;
+#endif
+	} s;
+};
+
+/**
+ * struct lbc_int_ena_w1s - LBC interrupt enable set register
+ * @cam_hard_err: Reads or sets enable for LBC_INT[CAM_HARD_ERR].
+ * @cam_inval_abort: Reads or sets enable for LBC_INT[CAM_INVAL_ABORT].
+ * @over_fetch_err: Reads or sets enable for LBC_INT[OVER_FETCH_ERR].
+ * @cache_line_to_err: Reads or sets enable for
+ *   LBC_INT[CACHE_LINE_TO_ERR].
+ * @cam_soft_err: Reads or sets enable for
+ *   LBC_INT[CAM_SOFT_ERR].
+ * @dma_rd_err: Reads or sets enable for
+ *   LBC_INT[DMA_RD_ERR].
+ */
+union lbc_int_ena_w1s {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_10_63 : 54;
+		u64 cam_hard_err : 1;
+		u64 cam_inval_abort : 1;
+		u64 over_fetch_err : 1;
+		u64 cache_line_to_err : 1;
+		u64 raz_2_5 : 4;
+		u64 cam_soft_err : 1;
+		u64 dma_rd_err : 1;
+#else
+		u64 dma_rd_err : 1;
+		u64 cam_soft_err : 1;
+		u64 raz_2_5 : 4;
+		u64 cache_line_to_err : 1;
+		u64 over_fetch_err : 1;
+		u64 cam_inval_abort : 1;
+		u64 cam_hard_err : 1;
+		u64 raz_10_63 : 54;
+#endif
+	} s;
+};
+
+/**
+ * struct lbc_int - LBC interrupt summary register
+ * @cam_hard_err: indicates a fatal hardware error.
+ *   It requires system reset.
+ *   When [CAM_HARD_ERR] is set, LBC stops logging any new information in
+ *   LBC_POM_MISS_INFO_LOG,
+ *   LBC_POM_MISS_ADDR_LOG,
+ *   LBC_EFL_MISS_INFO_LOG, and
+ *   LBC_EFL_MISS_ADDR_LOG.
+ *   Software should sample them.
+ * @cam_inval_abort: indicates a fatal hardware error.
+ *   System reset is required.
+ * @over_fetch_err: indicates a fatal hardware error
+ *   System reset is required
+ * @cache_line_to_err: is a debug feature.
+ *   This timeout interrupt bit tells the software that
+ *   a cacheline in LBC has non-zero usage and the context
+ *   has not been used for greater than the
+ *   LBC_TO_CNT[TO_CNT] time interval.
+ * @sbe: Memory SBE error. This is recoverable via ECC.
+ *   See LBC_ECC_INT for more details.
+ * @dbe: Memory DBE error. This is a fatal and requires a
+ *   system reset.
+ * @pref_dat_len_mismatch_err: Summary bit for context length
+ *   mismatch errors.
+ * @rd_dat_len_mismatch_err: Summary bit for SE read data length
+ *   greater than data prefect length errors.
+ * @cam_soft_err: is recoverable. Software must complete a
+ *   LBC_INVAL_CTL[CAM_INVAL_START] invalidation sequence and
+ *   then clear [CAM_SOFT_ERR].
+ * @dma_rd_err: A context prefect read of host memory returned with
+ *   a read error.
+ */
+union lbc_int {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz_10_63 : 54;
+		u64 cam_hard_err : 1;
+		u64 cam_inval_abort : 1;
+		u64 over_fetch_err : 1;
+		u64 cache_line_to_err : 1;
+		u64 sbe : 1;
+		u64 dbe	: 1;
+		u64 pref_dat_len_mismatch_err : 1;
+		u64 rd_dat_len_mismatch_err : 1;
+		u64 cam_soft_err : 1;
+		u64 dma_rd_err : 1;
+#else
+		u64 dma_rd_err : 1;
+		u64 cam_soft_err : 1;
+		u64 rd_dat_len_mismatch_err : 1;
+		u64 pref_dat_len_mismatch_err : 1;
+		u64 dbe	: 1;
+		u64 sbe	: 1;
+		u64 cache_line_to_err : 1;
+		u64 over_fetch_err : 1;
+		u64 cam_inval_abort : 1;
+		u64 cam_hard_err : 1;
+		u64 raz_10_63 : 54;
+#endif
+	} s;
+};
+
+/**
+ * struct lbc_inval_status: LBC Invalidation status register
+ * @cam_clean_entry_complete_cnt: The number of entries that are
+ *   cleaned up successfully.
+ * @cam_clean_entry_cnt: The number of entries that have the CAM
+ *   inval command issued.
+ * @cam_inval_state: cam invalidation FSM state
+ * @cam_inval_abort: cam invalidation abort
+ * @cam_rst_rdy: lbc_cam reset ready
+ * @done: LBC clears [DONE] when
+ *   LBC_INVAL_CTL[CAM_INVAL_START] is written with a one,
+ *   and sets [DONE] when it completes the invalidation
+ *   sequence.
+ */
+union lbc_inval_status {
+	u64 value;
+	struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+		u64 raz3 : 23;
+		u64 cam_clean_entry_complete_cnt : 9;
+		u64 raz2 : 7;
+		u64 cam_clean_entry_cnt : 9;
+		u64 raz1 : 5;
+		u64 cam_inval_state : 3;
+		u64 raz0 : 5;
+		u64 cam_inval_abort : 1;
+		u64 cam_rst_rdy	: 1;
+		u64 done : 1;
+#else
+		u64 done : 1;
+		u64 cam_rst_rdy : 1;
+		u64 cam_inval_abort : 1;
+		u64 raz0 : 5;
+		u64 cam_inval_state : 3;
+		u64 raz1 : 5;
+		u64 cam_clean_entry_cnt : 9;
+		u64 raz2 : 7;
+		u64 cam_clean_entry_complete_cnt : 9;
+		u64 raz3 : 23;
+#endif
+	} s;
+};
+
+#endif /* __NITROX_CSR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
new file mode 100644
index 0000000..af59645
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_DEV_H
+#define __NITROX_DEV_H
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#define VERSION_LEN 32
+
+struct nitrox_cmdq {
+	/* command queue lock */
+	spinlock_t cmdq_lock;
+	/* response list lock */
+	spinlock_t response_lock;
+	/* backlog list lock */
+	spinlock_t backlog_lock;
+
+	/* request submitted to chip, in progress */
+	struct list_head response_head;
+	/* hw queue full, hold in backlog list */
+	struct list_head backlog_head;
+
+	/* doorbell address */
+	u8 __iomem *dbell_csr_addr;
+	/* base address of the queue */
+	u8 *head;
+
+	struct nitrox_device *ndev;
+	/* flush pending backlog commands */
+	struct work_struct backlog_qflush;
+
+	/* requests posted waiting for completion */
+	atomic_t pending_count;
+	/* requests in backlog queues */
+	atomic_t backlog_count;
+
+	int write_idx;
+	/* command size 32B/64B */
+	u8 instr_size;
+	u8 qno;
+	u32 qsize;
+
+	/* unaligned addresses */
+	u8 *head_unaligned;
+	dma_addr_t dma_unaligned;
+	/* dma address of the base */
+	dma_addr_t dma;
+};
+
+struct nitrox_hw {
+	/* firmware version */
+	char fw_name[VERSION_LEN];
+
+	u16 vendor_id;
+	u16 device_id;
+	u8 revision_id;
+
+	/* CNN55XX cores */
+	u8 se_cores;
+	u8 ae_cores;
+	u8 zip_cores;
+};
+
+#define MAX_MSIX_VECTOR_NAME	20
+/**
+ * vectors for queues (64 AE, 64 SE and 64 ZIP) and
+ * error condition/mailbox.
+ */
+#define MAX_MSIX_VECTORS	192
+
+struct nitrox_msix {
+	struct msix_entry *entries;
+	char **names;
+	DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS);
+	u32 nr_entries;
+};
+
+struct bh_data {
+	/* slc port completion count address */
+	u8 __iomem *completion_cnt_csr_addr;
+
+	struct nitrox_cmdq *cmdq;
+	struct tasklet_struct resp_handler;
+};
+
+struct nitrox_bh {
+	struct bh_data *slc;
+};
+
+/* NITROX-V driver state */
+#define NITROX_UCODE_LOADED	0
+#define NITROX_READY		1
+
+/* command queue size */
+#define DEFAULT_CMD_QLEN 2048
+/* command timeout in milliseconds */
+#define CMD_TIMEOUT 2000
+
+#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
+#define PF_MODE 0
+
+#define NITROX_CSR_ADDR(ndev, offset) \
+	((ndev)->bar_addr + (offset))
+
+/**
+ * struct nitrox_device - NITROX Device Information.
+ * @list: pointer to linked list of devices
+ * @bar_addr: iomap address
+ * @pdev: PCI device information
+ * @status: NITROX status
+ * @timeout: Request timeout in jiffies
+ * @refcnt: Device usage count
+ * @idx: device index (0..N)
+ * @node: NUMA node id attached
+ * @qlen: Command queue length
+ * @nr_queues: Number of command queues
+ * @ctx_pool: DMA pool for crypto context
+ * @pkt_cmdqs: SE Command queues
+ * @msix: MSI-X information
+ * @bh: post processing work
+ * @hw: hardware information
+ * @debugfs_dir: debugfs directory
+ */
+struct nitrox_device {
+	struct list_head list;
+
+	u8 __iomem *bar_addr;
+	struct pci_dev *pdev;
+
+	unsigned long status;
+	unsigned long timeout;
+	refcount_t refcnt;
+
+	u8 idx;
+	int node;
+	u16 qlen;
+	u16 nr_queues;
+
+	struct dma_pool *ctx_pool;
+	struct nitrox_cmdq *pkt_cmdqs;
+
+	struct nitrox_msix msix;
+	struct nitrox_bh bh;
+
+	struct nitrox_hw hw;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	struct dentry *debugfs_dir;
+#endif
+};
+
+/**
+ * nitrox_read_csr - Read from device register
+ * @ndev: NITROX device
+ * @offset: offset of the register to read
+ *
+ * Returns: value read
+ */
+static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset)
+{
+	return readq(ndev->bar_addr + offset);
+}
+
+/**
+ * nitrox_write_csr - Write to device register
+ * @ndev: NITROX device
+ * @offset: offset of the register to write
+ * @value: value to write
+ */
+static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
+				    u64 value)
+{
+	writeq(value, (ndev->bar_addr + offset));
+}
+
+static inline int nitrox_ready(struct nitrox_device *ndev)
+{
+	return test_bit(NITROX_READY, &ndev->status);
+}
+
+#endif /* __NITROX_DEV_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
new file mode 100644
index 0000000..ab4ccf2
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/delay.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_csr.h"
+
+/**
+ * emu_enable_cores - Enable EMU cluster cores.
+ * @ndev: N5 device
+ */
+static void emu_enable_cores(struct nitrox_device *ndev)
+{
+	union emu_se_enable emu_se;
+	union emu_ae_enable emu_ae;
+	int i;
+
+	/* AE cores 20 per cluster */
+	emu_ae.value = 0;
+	emu_ae.s.enable = 0xfffff;
+
+	/* SE cores 16 per cluster */
+	emu_se.value = 0;
+	emu_se.s.enable = 0xffff;
+
+	/* enable per cluster cores */
+	for (i = 0; i < NR_CLUSTERS; i++) {
+		nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value);
+		nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value);
+	}
+}
+
+/**
+ * nitrox_config_emu_unit - configure EMU unit.
+ * @ndev: N5 device
+ */
+void nitrox_config_emu_unit(struct nitrox_device *ndev)
+{
+	union emu_wd_int_ena_w1s emu_wd_int;
+	union emu_ge_int_ena_w1s emu_ge_int;
+	u64 offset;
+	int i;
+
+	/* enable cores */
+	emu_enable_cores(ndev);
+
+	/* enable general error and watch dog interrupts */
+	emu_ge_int.value = 0;
+	emu_ge_int.s.se_ge = 0xffff;
+	emu_ge_int.s.ae_ge = 0xfffff;
+	emu_wd_int.value = 0;
+	emu_wd_int.s.se_wd = 1;
+
+	for (i = 0; i < NR_CLUSTERS; i++) {
+		offset = EMU_WD_INT_ENA_W1SX(i);
+		nitrox_write_csr(ndev, offset, emu_wd_int.value);
+		offset = EMU_GE_INT_ENA_W1SX(i);
+		nitrox_write_csr(ndev, offset, emu_ge_int.value);
+	}
+}
+
+static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
+{
+	union nps_pkt_in_instr_ctl pkt_in_ctl;
+	union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
+	union nps_pkt_in_done_cnts pkt_in_cnts;
+	u64 offset;
+
+	offset = NPS_PKT_IN_INSTR_CTLX(ring);
+	/* disable the ring */
+	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
+	pkt_in_ctl.s.enb = 0;
+	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
+	usleep_range(100, 150);
+
+	/* wait to clear [ENB] */
+	do {
+		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
+	} while (pkt_in_ctl.s.enb);
+
+	/* clear off door bell counts */
+	offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
+	pkt_in_dbell.value = 0;
+	pkt_in_dbell.s.dbell = 0xffffffff;
+	nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
+
+	/* clear done counts */
+	offset = NPS_PKT_IN_DONE_CNTSX(ring);
+	pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
+	nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
+	usleep_range(50, 100);
+}
+
+void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
+{
+	union nps_pkt_in_instr_ctl pkt_in_ctl;
+	u64 offset;
+
+	/* 64-byte instruction size */
+	offset = NPS_PKT_IN_INSTR_CTLX(ring);
+	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
+	pkt_in_ctl.s.is64b = 1;
+	pkt_in_ctl.s.enb = 1;
+	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
+
+	/* wait for set [ENB] */
+	do {
+		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
+	} while (!pkt_in_ctl.s.enb);
+}
+
+/**
+ * nitrox_config_pkt_input_rings - configure Packet Input Rings
+ * @ndev: N5 device
+ */
+void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < ndev->nr_queues; i++) {
+		struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+		union nps_pkt_in_instr_rsize pkt_in_rsize;
+		u64 offset;
+
+		reset_pkt_input_ring(ndev, i);
+
+		/* configure ring base address 16-byte aligned,
+		 * size and interrupt threshold.
+		 */
+		offset = NPS_PKT_IN_INSTR_BADDRX(i);
+		nitrox_write_csr(ndev, offset, cmdq->dma);
+
+		/* configure ring size */
+		offset = NPS_PKT_IN_INSTR_RSIZEX(i);
+		pkt_in_rsize.value = 0;
+		pkt_in_rsize.s.rsize = ndev->qlen;
+		nitrox_write_csr(ndev, offset, pkt_in_rsize.value);
+
+		/* set high threshold for pkt input ring interrupts */
+		offset = NPS_PKT_IN_INT_LEVELSX(i);
+		nitrox_write_csr(ndev, offset, 0xffffffff);
+
+		enable_pkt_input_ring(ndev, i);
+	}
+}
+
+static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
+{
+	union nps_pkt_slc_ctl pkt_slc_ctl;
+	union nps_pkt_slc_cnts pkt_slc_cnts;
+	u64 offset;
+
+	/* disable slc port */
+	offset = NPS_PKT_SLC_CTLX(port);
+	pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
+	pkt_slc_ctl.s.enb = 0;
+	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
+	usleep_range(100, 150);
+
+	/* wait to clear [ENB] */
+	do {
+		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
+	} while (pkt_slc_ctl.s.enb);
+
+	/* clear slc counters */
+	offset = NPS_PKT_SLC_CNTSX(port);
+	pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
+	nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
+	usleep_range(50, 100);
+}
+
+void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
+{
+	union nps_pkt_slc_ctl pkt_slc_ctl;
+	u64 offset;
+
+	offset = NPS_PKT_SLC_CTLX(port);
+	pkt_slc_ctl.value = 0;
+	pkt_slc_ctl.s.enb = 1;
+
+	/*
+	 * 8 trailing 0x00 bytes will be added
+	 * to the end of the outgoing packet.
+	 */
+	pkt_slc_ctl.s.z = 1;
+	/* enable response header */
+	pkt_slc_ctl.s.rh = 1;
+	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
+
+	/* wait to set [ENB] */
+	do {
+		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
+	} while (!pkt_slc_ctl.s.enb);
+}
+
+static void config_single_pkt_solicit_port(struct nitrox_device *ndev,
+					   int port)
+{
+	union nps_pkt_slc_int_levels pkt_slc_int;
+	u64 offset;
+
+	reset_pkt_solicit_port(ndev, port);
+
+	offset = NPS_PKT_SLC_INT_LEVELSX(port);
+	pkt_slc_int.value = 0;
+	/* time interrupt threshold */
+	pkt_slc_int.s.timet = 0x3fffff;
+	nitrox_write_csr(ndev, offset, pkt_slc_int.value);
+
+	enable_pkt_solicit_port(ndev, port);
+}
+
+void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < ndev->nr_queues; i++)
+		config_single_pkt_solicit_port(ndev, i);
+}
+
+/**
+ * enable_nps_interrupts - enable NPS interrutps
+ * @ndev: N5 device.
+ *
+ * This includes NPS core, packet in and slc interrupts.
+ */
+static void enable_nps_interrupts(struct nitrox_device *ndev)
+{
+	union nps_core_int_ena_w1s core_int;
+
+	/* NPS core interrutps */
+	core_int.value = 0;
+	core_int.s.host_wr_err = 1;
+	core_int.s.host_wr_timeout = 1;
+	core_int.s.exec_wr_timeout = 1;
+	core_int.s.npco_dma_malform = 1;
+	core_int.s.host_nps_wr_err = 1;
+	nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
+
+	/* NPS packet in ring interrupts */
+	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
+	/* NPS packet slc port interrupts */
+	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
+	nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
+}
+
+void nitrox_config_nps_unit(struct nitrox_device *ndev)
+{
+	union nps_core_gbl_vfcfg core_gbl_vfcfg;
+
+	/* endian control information */
+	nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
+
+	/* disable ILK interface */
+	core_gbl_vfcfg.value = 0;
+	core_gbl_vfcfg.s.ilk_disable = 1;
+	core_gbl_vfcfg.s.cfg = PF_MODE;
+	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
+	/* config input and solicit ports */
+	nitrox_config_pkt_input_rings(ndev);
+	nitrox_config_pkt_solicit_ports(ndev);
+
+	/* enable interrupts */
+	enable_nps_interrupts(ndev);
+}
+
+void nitrox_config_pom_unit(struct nitrox_device *ndev)
+{
+	union pom_int_ena_w1s pom_int;
+	int i;
+
+	/* enable pom interrupts */
+	pom_int.value = 0;
+	pom_int.s.illegal_dport = 1;
+	nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);
+
+	/* enable perf counters */
+	for (i = 0; i < ndev->hw.se_cores; i++)
+		nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
+}
+
+/**
+ * nitrox_config_rand_unit - enable N5 random number unit
+ * @ndev: N5 device
+ */
+void nitrox_config_rand_unit(struct nitrox_device *ndev)
+{
+	union efl_rnm_ctl_status efl_rnm_ctl;
+	u64 offset;
+
+	offset = EFL_RNM_CTL_STATUS;
+	efl_rnm_ctl.value = nitrox_read_csr(ndev, offset);
+	efl_rnm_ctl.s.ent_en = 1;
+	efl_rnm_ctl.s.rng_en = 1;
+	nitrox_write_csr(ndev, offset, efl_rnm_ctl.value);
+}
+
+void nitrox_config_efl_unit(struct nitrox_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < NR_CLUSTERS; i++) {
+		union efl_core_int_ena_w1s efl_core_int;
+		u64 offset;
+
+		/* EFL core interrupts */
+		offset = EFL_CORE_INT_ENA_W1SX(i);
+		efl_core_int.value = 0;
+		efl_core_int.s.len_ovr = 1;
+		efl_core_int.s.d_left = 1;
+		efl_core_int.s.epci_decode_err = 1;
+		nitrox_write_csr(ndev, offset, efl_core_int.value);
+
+		offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i);
+		nitrox_write_csr(ndev, offset, (~0ULL));
+		offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i);
+		nitrox_write_csr(ndev, offset, (~0ULL));
+	}
+}
+
+void nitrox_config_bmi_unit(struct nitrox_device *ndev)
+{
+	union bmi_ctl bmi_ctl;
+	union bmi_int_ena_w1s bmi_int_ena;
+	u64 offset;
+
+	/* no threshold limits for PCIe */
+	offset = BMI_CTL;
+	bmi_ctl.value = nitrox_read_csr(ndev, offset);
+	bmi_ctl.s.max_pkt_len = 0xff;
+	bmi_ctl.s.nps_free_thrsh = 0xff;
+	bmi_ctl.s.nps_hdrq_thrsh = 0x7a;
+	nitrox_write_csr(ndev, offset, bmi_ctl.value);
+
+	/* enable interrupts */
+	offset = BMI_INT_ENA_W1S;
+	bmi_int_ena.value = 0;
+	bmi_int_ena.s.max_len_err_nps = 1;
+	bmi_int_ena.s.pkt_rcv_err_nps = 1;
+	bmi_int_ena.s.fpf_undrrn = 1;
+	nitrox_write_csr(ndev, offset, bmi_int_ena.value);
+}
+
+void nitrox_config_bmo_unit(struct nitrox_device *ndev)
+{
+	union bmo_ctl2 bmo_ctl2;
+	u64 offset;
+
+	/* no threshold limits for PCIe */
+	offset = BMO_CTL2;
+	bmo_ctl2.value = nitrox_read_csr(ndev, offset);
+	bmo_ctl2.s.nps_slc_buf_thrsh = 0xff;
+	nitrox_write_csr(ndev, offset, bmo_ctl2.value);
+}
+
+void invalidate_lbc(struct nitrox_device *ndev)
+{
+	union lbc_inval_ctl lbc_ctl;
+	union lbc_inval_status lbc_stat;
+	u64 offset;
+
+	/* invalidate LBC */
+	offset = LBC_INVAL_CTL;
+	lbc_ctl.value = nitrox_read_csr(ndev, offset);
+	lbc_ctl.s.cam_inval_start = 1;
+	nitrox_write_csr(ndev, offset, lbc_ctl.value);
+
+	offset = LBC_INVAL_STATUS;
+
+	do {
+		lbc_stat.value = nitrox_read_csr(ndev, offset);
+	} while (!lbc_stat.s.done);
+}
+
+void nitrox_config_lbc_unit(struct nitrox_device *ndev)
+{
+	union lbc_int_ena_w1s lbc_int_ena;
+	u64 offset;
+
+	invalidate_lbc(ndev);
+
+	/* enable interrupts */
+	offset = LBC_INT_ENA_W1S;
+	lbc_int_ena.value = 0;
+	lbc_int_ena.s.dma_rd_err = 1;
+	lbc_int_ena.s.over_fetch_err = 1;
+	lbc_int_ena.s.cam_inval_abort = 1;
+	lbc_int_ena.s.cam_hard_err = 1;
+	nitrox_write_csr(ndev, offset, lbc_int_ena.value);
+
+	offset = LBC_PLM_VF1_64_INT_ENA_W1S;
+	nitrox_write_csr(ndev, offset, (~0ULL));
+	offset = LBC_PLM_VF65_128_INT_ENA_W1S;
+	nitrox_write_csr(ndev, offset, (~0ULL));
+
+	offset = LBC_ELM_VF1_64_INT_ENA_W1S;
+	nitrox_write_csr(ndev, offset, (~0ULL));
+	offset = LBC_ELM_VF65_128_INT_ENA_W1S;
+	nitrox_write_csr(ndev, offset, (~0ULL));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
new file mode 100644
index 0000000..ee0d70b
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_csr.h"
+#include "nitrox_common.h"
+
+#define NR_RING_VECTORS 3
+#define NPS_CORE_INT_ACTIVE_ENTRY 192
+
+/**
+ * nps_pkt_slc_isr - IRQ handler for NPS solicit port
+ * @irq: irq number
+ * @data: argument
+ */
+static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
+{
+	struct bh_data *slc = data;
+	union nps_pkt_slc_cnts pkt_slc_cnts;
+
+	pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
+	/* New packet on SLC output port */
+	if (pkt_slc_cnts.s.slc_int)
+		tasklet_hi_schedule(&slc->resp_handler);
+
+	return IRQ_HANDLED;
+}
+
+static void clear_nps_core_err_intr(struct nitrox_device *ndev)
+{
+	u64 value;
+
+	/* Write 1 to clear */
+	value = nitrox_read_csr(ndev, NPS_CORE_INT);
+	nitrox_write_csr(ndev, NPS_CORE_INT, value);
+
+	dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
+}
+
+static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
+{
+	union nps_pkt_int pkt_int;
+	unsigned long value, offset;
+	int i;
+
+	pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
+	dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
+			    pkt_int.value);
+
+	if (pkt_int.s.slc_err) {
+		offset = NPS_PKT_SLC_ERR_TYPE;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+		dev_err_ratelimited(DEV(ndev),
+				    "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
+
+		offset = NPS_PKT_SLC_RERR_LO;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+		/* enable the solicit ports */
+		for_each_set_bit(i, &value, BITS_PER_LONG)
+			enable_pkt_solicit_port(ndev, i);
+
+		dev_err_ratelimited(DEV(ndev),
+				    "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
+
+		offset = NPS_PKT_SLC_RERR_HI;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+		dev_err_ratelimited(DEV(ndev),
+				    "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
+	}
+
+	if (pkt_int.s.in_err) {
+		offset = NPS_PKT_IN_ERR_TYPE;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+		dev_err_ratelimited(DEV(ndev),
+				    "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
+		offset = NPS_PKT_IN_RERR_LO;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+		/* enable the input ring */
+		for_each_set_bit(i, &value, BITS_PER_LONG)
+			enable_pkt_input_ring(ndev, i);
+
+		dev_err_ratelimited(DEV(ndev),
+				    "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
+
+		offset = NPS_PKT_IN_RERR_HI;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+		dev_err_ratelimited(DEV(ndev),
+				    "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
+	}
+}
+
+static void clear_pom_err_intr(struct nitrox_device *ndev)
+{
+	u64 value;
+
+	value = nitrox_read_csr(ndev, POM_INT);
+	nitrox_write_csr(ndev, POM_INT, value);
+	dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
+}
+
+static void clear_pem_err_intr(struct nitrox_device *ndev)
+{
+	u64 value;
+
+	value = nitrox_read_csr(ndev, PEM0_INT);
+	nitrox_write_csr(ndev, PEM0_INT, value);
+	dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
+}
+
+static void clear_lbc_err_intr(struct nitrox_device *ndev)
+{
+	union lbc_int lbc_int;
+	u64 value, offset;
+	int i;
+
+	lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
+	dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
+
+	if (lbc_int.s.dma_rd_err) {
+		for (i = 0; i < NR_CLUSTERS; i++) {
+			offset = EFL_CORE_VF_ERR_INT0X(i);
+			value = nitrox_read_csr(ndev, offset);
+			nitrox_write_csr(ndev, offset, value);
+			offset = EFL_CORE_VF_ERR_INT1X(i);
+			value = nitrox_read_csr(ndev, offset);
+			nitrox_write_csr(ndev, offset, value);
+		}
+	}
+
+	if (lbc_int.s.cam_soft_err) {
+		dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
+		invalidate_lbc(ndev);
+	}
+
+	if (lbc_int.s.pref_dat_len_mismatch_err) {
+		offset = LBC_PLM_VF1_64_INT;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+		offset = LBC_PLM_VF65_128_INT;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+	}
+
+	if (lbc_int.s.rd_dat_len_mismatch_err) {
+		offset = LBC_ELM_VF1_64_INT;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+		offset = LBC_ELM_VF65_128_INT;
+		value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, value);
+	}
+	nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
+}
+
+static void clear_efl_err_intr(struct nitrox_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < NR_CLUSTERS; i++) {
+		union efl_core_int core_int;
+		u64 value, offset;
+
+		offset = EFL_CORE_INTX(i);
+		core_int.value = nitrox_read_csr(ndev, offset);
+		nitrox_write_csr(ndev, offset, core_int.value);
+		dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
+				    i, core_int.value);
+		if (core_int.s.se_err) {
+			offset = EFL_CORE_SE_ERR_INTX(i);
+			value = nitrox_read_csr(ndev, offset);
+			nitrox_write_csr(ndev, offset, value);
+		}
+	}
+}
+
+static void clear_bmi_err_intr(struct nitrox_device *ndev)
+{
+	u64 value;
+
+	value = nitrox_read_csr(ndev, BMI_INT);
+	nitrox_write_csr(ndev, BMI_INT, value);
+	dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
+}
+
+/**
+ * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
+ * @ndev: NITROX device
+ */
+static void clear_nps_core_int_active(struct nitrox_device *ndev)
+{
+	union nps_core_int_active core_int_active;
+
+	core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
+
+	if (core_int_active.s.nps_core)
+		clear_nps_core_err_intr(ndev);
+
+	if (core_int_active.s.nps_pkt)
+		clear_nps_pkt_err_intr(ndev);
+
+	if (core_int_active.s.pom)
+		clear_pom_err_intr(ndev);
+
+	if (core_int_active.s.pem)
+		clear_pem_err_intr(ndev);
+
+	if (core_int_active.s.lbc)
+		clear_lbc_err_intr(ndev);
+
+	if (core_int_active.s.efl)
+		clear_efl_err_intr(ndev);
+
+	if (core_int_active.s.bmi)
+		clear_bmi_err_intr(ndev);
+
+	/* If more work callback the ISR, set resend */
+	core_int_active.s.resend = 1;
+	nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
+}
+
+static irqreturn_t nps_core_int_isr(int irq, void *data)
+{
+	struct nitrox_device *ndev = data;
+
+	clear_nps_core_int_active(ndev);
+
+	return IRQ_HANDLED;
+}
+
+static int nitrox_enable_msix(struct nitrox_device *ndev)
+{
+	struct msix_entry *entries;
+	char **names;
+	int i, nr_entries, ret;
+
+	/*
+	 * PF MSI-X vectors
+	 *
+	 * Entry 0: NPS PKT ring 0
+	 * Entry 1: AQMQ ring 0
+	 * Entry 2: ZQM ring 0
+	 * Entry 3: NPS PKT ring 1
+	 * Entry 4: AQMQ ring 1
+	 * Entry 5: ZQM ring 1
+	 * ....
+	 * Entry 192: NPS_CORE_INT_ACTIVE
+	 */
+	nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
+	entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
+			       GFP_KERNEL, ndev->node);
+	if (!entries)
+		return -ENOMEM;
+
+	names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
+	if (!names) {
+		kfree(entries);
+		return -ENOMEM;
+	}
+
+	/* fill entires */
+	for (i = 0; i < (nr_entries - 1); i++)
+		entries[i].entry = i;
+
+	entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
+
+	for (i = 0; i < nr_entries; i++) {
+		*(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+		if (!(*(names + i))) {
+			ret = -ENOMEM;
+			goto msix_fail;
+		}
+	}
+	ndev->msix.entries = entries;
+	ndev->msix.names = names;
+	ndev->msix.nr_entries = nr_entries;
+
+	ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
+				    ndev->msix.nr_entries);
+	if (ret) {
+		dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
+			ret);
+		goto msix_fail;
+	}
+	return 0;
+
+msix_fail:
+	for (i = 0; i < nr_entries; i++)
+		kfree(*(names + i));
+
+	kfree(entries);
+	kfree(names);
+	return ret;
+}
+
+static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
+{
+	int i;
+
+	if (!ndev->bh.slc)
+		return;
+
+	for (i = 0; i < ndev->nr_queues; i++) {
+		struct bh_data *bh = &ndev->bh.slc[i];
+
+		tasklet_disable(&bh->resp_handler);
+		tasklet_kill(&bh->resp_handler);
+	}
+	kfree(ndev->bh.slc);
+	ndev->bh.slc = NULL;
+}
+
+static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
+{
+	u32 size;
+	int i;
+
+	size = ndev->nr_queues * sizeof(struct bh_data);
+	ndev->bh.slc = kzalloc(size, GFP_KERNEL);
+	if (!ndev->bh.slc)
+		return -ENOMEM;
+
+	for (i = 0; i < ndev->nr_queues; i++) {
+		struct bh_data *bh = &ndev->bh.slc[i];
+		u64 offset;
+
+		offset = NPS_PKT_SLC_CNTSX(i);
+		/* pre calculate completion count address */
+		bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
+		bh->cmdq = &ndev->pkt_cmdqs[i];
+
+		tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
+			     (unsigned long)bh);
+	}
+
+	return 0;
+}
+
+static int nitrox_request_irqs(struct nitrox_device *ndev)
+{
+	struct pci_dev *pdev = ndev->pdev;
+	struct msix_entry *msix_ent = ndev->msix.entries;
+	int nr_ring_vectors, i = 0, ring, cpu, ret;
+	char *name;
+
+	/*
+	 * PF MSI-X vectors
+	 *
+	 * Entry 0: NPS PKT ring 0
+	 * Entry 1: AQMQ ring 0
+	 * Entry 2: ZQM ring 0
+	 * Entry 3: NPS PKT ring 1
+	 * ....
+	 * Entry 192: NPS_CORE_INT_ACTIVE
+	 */
+	nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
+
+	/* request irq for pkt ring/ports only */
+	while (i < nr_ring_vectors) {
+		name = *(ndev->msix.names + i);
+		ring = (i / NR_RING_VECTORS);
+		snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
+			 ndev->idx, ring);
+
+		ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
+				  name, &ndev->bh.slc[ring]);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to get irq %d for %s\n",
+				msix_ent[i].vector, name);
+			return ret;
+		}
+		cpu = ring % num_online_cpus();
+		irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
+
+		set_bit(i, ndev->msix.irqs);
+		i += NR_RING_VECTORS;
+	}
+
+	/* Request IRQ for NPS_CORE_INT_ACTIVE */
+	name = *(ndev->msix.names + i);
+	snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
+	ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to get irq %d for %s\n",
+			msix_ent[i].vector, name);
+		return ret;
+	}
+	set_bit(i, ndev->msix.irqs);
+
+	return 0;
+}
+
+static void nitrox_disable_msix(struct nitrox_device *ndev)
+{
+	struct msix_entry *msix_ent = ndev->msix.entries;
+	char **names = ndev->msix.names;
+	int i = 0, ring, nr_ring_vectors;
+
+	nr_ring_vectors = ndev->msix.nr_entries - 1;
+
+	/* clear pkt ring irqs */
+	while (i < nr_ring_vectors) {
+		if (test_and_clear_bit(i, ndev->msix.irqs)) {
+			ring = (i / NR_RING_VECTORS);
+			irq_set_affinity_hint(msix_ent[i].vector, NULL);
+			free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
+		}
+		i += NR_RING_VECTORS;
+	}
+	irq_set_affinity_hint(msix_ent[i].vector, NULL);
+	free_irq(msix_ent[i].vector, ndev);
+	clear_bit(i, ndev->msix.irqs);
+
+	kfree(ndev->msix.entries);
+	for (i = 0; i < ndev->msix.nr_entries; i++)
+		kfree(*(names + i));
+
+	kfree(names);
+	pci_disable_msix(ndev->pdev);
+}
+
+/**
+ * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
+ * @ndev: NITROX device
+ */
+void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
+{
+	nitrox_disable_msix(ndev);
+	nitrox_cleanup_pkt_slc_bh(ndev);
+}
+
+/**
+ * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
+ * @ndev: NITROX device
+ *
+ * Return: 0 on success, a negative value on failure.
+ */
+int nitrox_pf_init_isr(struct nitrox_device *ndev)
+{
+	int err;
+
+	err = nitrox_setup_pkt_slc_bh(ndev);
+	if (err)
+		return err;
+
+	err = nitrox_enable_msix(ndev);
+	if (err)
+		goto msix_fail;
+
+	err = nitrox_request_irqs(ndev);
+	if (err)
+		goto irq_fail;
+
+	return 0;
+
+irq_fail:
+	nitrox_disable_msix(ndev);
+msix_fail:
+	nitrox_cleanup_pkt_slc_bh(ndev);
+	return err;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
new file mode 100644
index 0000000..4d31df0
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cpumask.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci_regs.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_common.h"
+#include "nitrox_req.h"
+#include "nitrox_csr.h"
+
+#define CRYPTO_CTX_SIZE	256
+
+/* command queue alignments */
+#define PKT_IN_ALIGN	16
+
+static int cmdq_common_init(struct nitrox_cmdq *cmdq)
+{
+	struct nitrox_device *ndev = cmdq->ndev;
+	u32 qsize;
+
+	qsize = (ndev->qlen) * cmdq->instr_size;
+	cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
+						   (qsize + PKT_IN_ALIGN),
+						   &cmdq->dma_unaligned,
+						   GFP_KERNEL);
+	if (!cmdq->head_unaligned)
+		return -ENOMEM;
+
+	cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
+	cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
+	cmdq->qsize = (qsize + PKT_IN_ALIGN);
+	cmdq->write_idx = 0;
+
+	spin_lock_init(&cmdq->response_lock);
+	spin_lock_init(&cmdq->cmdq_lock);
+	spin_lock_init(&cmdq->backlog_lock);
+
+	INIT_LIST_HEAD(&cmdq->response_head);
+	INIT_LIST_HEAD(&cmdq->backlog_head);
+	INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
+
+	atomic_set(&cmdq->pending_count, 0);
+	atomic_set(&cmdq->backlog_count, 0);
+	return 0;
+}
+
+static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
+{
+	struct nitrox_device *ndev = cmdq->ndev;
+
+	cancel_work_sync(&cmdq->backlog_qflush);
+
+	dma_free_coherent(DEV(ndev), cmdq->qsize,
+			  cmdq->head_unaligned, cmdq->dma_unaligned);
+
+	atomic_set(&cmdq->pending_count, 0);
+	atomic_set(&cmdq->backlog_count, 0);
+
+	cmdq->dbell_csr_addr = NULL;
+	cmdq->head = NULL;
+	cmdq->dma = 0;
+	cmdq->qsize = 0;
+	cmdq->instr_size = 0;
+}
+
+static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < ndev->nr_queues; i++) {
+		struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+
+		cmdq_common_cleanup(cmdq);
+	}
+	kfree(ndev->pkt_cmdqs);
+	ndev->pkt_cmdqs = NULL;
+}
+
+static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
+{
+	int i, err, size;
+
+	size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
+	ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
+	if (!ndev->pkt_cmdqs)
+		return -ENOMEM;
+
+	for (i = 0; i < ndev->nr_queues; i++) {
+		struct nitrox_cmdq *cmdq;
+		u64 offset;
+
+		cmdq = &ndev->pkt_cmdqs[i];
+		cmdq->ndev = ndev;
+		cmdq->qno = i;
+		cmdq->instr_size = sizeof(struct nps_pkt_instr);
+
+		offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
+		/* SE ring doorbell address for this queue */
+		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
+
+		err = cmdq_common_init(cmdq);
+		if (err)
+			goto pkt_cmdq_fail;
+	}
+	return 0;
+
+pkt_cmdq_fail:
+	nitrox_cleanup_pkt_cmdqs(ndev);
+	return err;
+}
+
+static int create_crypto_dma_pool(struct nitrox_device *ndev)
+{
+	size_t size;
+
+	/* Crypto context pool, 16 byte aligned */
+	size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
+	ndev->ctx_pool = dma_pool_create("crypto-context",
+					 DEV(ndev), size, 16, 0);
+	if (!ndev->ctx_pool)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
+{
+	if (!ndev->ctx_pool)
+		return;
+
+	dma_pool_destroy(ndev->ctx_pool);
+	ndev->ctx_pool = NULL;
+}
+
+/*
+ * crypto_alloc_context - Allocate crypto context from pool
+ * @ndev: NITROX Device
+ */
+void *crypto_alloc_context(struct nitrox_device *ndev)
+{
+	struct ctx_hdr *ctx;
+	void *vaddr;
+	dma_addr_t dma;
+
+	vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
+	if (!vaddr)
+		return NULL;
+
+	/* fill meta data */
+	ctx = vaddr;
+	ctx->pool = ndev->ctx_pool;
+	ctx->dma = dma;
+	ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
+
+	return ((u8 *)vaddr + sizeof(struct ctx_hdr));
+}
+
+/**
+ * crypto_free_context - Free crypto context to pool
+ * @ctx: context to free
+ */
+void crypto_free_context(void *ctx)
+{
+	struct ctx_hdr *ctxp;
+
+	if (!ctx)
+		return;
+
+	ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
+	dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
+}
+
+/**
+ * nitrox_common_sw_init - allocate software resources.
+ * @ndev: NITROX device
+ *
+ * Allocates crypto context pools and command queues etc.
+ *
+ * Return: 0 on success, or a negative error code on error.
+ */
+int nitrox_common_sw_init(struct nitrox_device *ndev)
+{
+	int err = 0;
+
+	/* per device crypto context pool */
+	err = create_crypto_dma_pool(ndev);
+	if (err)
+		return err;
+
+	err = nitrox_init_pkt_cmdqs(ndev);
+	if (err)
+		destroy_crypto_dma_pool(ndev);
+
+	return err;
+}
+
+/**
+ * nitrox_common_sw_cleanup - free software resources.
+ * @ndev: NITROX device
+ */
+void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
+{
+	nitrox_cleanup_pkt_cmdqs(ndev);
+	destroy_crypto_dma_pool(ndev);
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
new file mode 100644
index 0000000..fee7cb2
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -0,0 +1,643 @@
+#include <linux/aer.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_common.h"
+#include "nitrox_csr.h"
+
+#define CNN55XX_DEV_ID	0x12
+#define MAX_PF_QUEUES	64
+#define UCODE_HLEN 48
+#define SE_GROUP 0
+
+#define DRIVER_VERSION "1.0"
+#define FW_DIR "cavium/"
+/* SE microcode */
+#define SE_FW	FW_DIR "cnn55xx_se.fw"
+
+static const char nitrox_driver_name[] = "CNN55XX";
+
+static LIST_HEAD(ndevlist);
+static DEFINE_MUTEX(devlist_lock);
+static unsigned int num_devices;
+
+/**
+ * nitrox_pci_tbl - PCI Device ID Table
+ */
+static const struct pci_device_id nitrox_pci_tbl[] = {
+	{PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
+	/* required last entry */
+	{0, }
+};
+MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
+
+static unsigned int qlen = DEFAULT_CMD_QLEN;
+module_param(qlen, uint, 0644);
+MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
+
+/**
+ * struct ucode - Firmware Header
+ * @id: microcode ID
+ * @version: firmware version
+ * @code_size: code section size
+ * @raz: alignment
+ * @code: code section
+ */
+struct ucode {
+	u8 id;
+	char version[VERSION_LEN - 1];
+	__be32 code_size;
+	u8 raz[12];
+	u64 code[0];
+};
+
+/**
+ * write_to_ucd_unit - Write Firmware to NITROX UCD unit
+ */
+static void write_to_ucd_unit(struct nitrox_device *ndev,
+			      struct ucode *ucode)
+{
+	u32 code_size = be32_to_cpu(ucode->code_size) * 2;
+	u64 offset, data;
+	int i = 0;
+
+	/*
+	 * UCD structure
+	 *
+	 *  -------------
+	 *  |    BLK 7  |
+	 *  -------------
+	 *  |    BLK 6  |
+	 *  -------------
+	 *  |    ...    |
+	 *  -------------
+	 *  |    BLK 0  |
+	 *  -------------
+	 *  Total of 8 blocks, each size 32KB
+	 */
+
+	/* set the block number */
+	offset = UCD_UCODE_LOAD_BLOCK_NUM;
+	nitrox_write_csr(ndev, offset, 0);
+
+	code_size = roundup(code_size, 8);
+	while (code_size) {
+		data = ucode->code[i];
+		/* write 8 bytes at a time */
+		offset = UCD_UCODE_LOAD_IDX_DATAX(i);
+		nitrox_write_csr(ndev, offset, data);
+		code_size -= 8;
+		i++;
+	}
+
+	/* put all SE cores in group 0 */
+	offset = POM_GRP_EXECMASKX(SE_GROUP);
+	nitrox_write_csr(ndev, offset, (~0ULL));
+
+	for (i = 0; i < ndev->hw.se_cores; i++) {
+		/*
+		 * write block number and firware length
+		 * bit:<2:0> block number
+		 * bit:3 is set SE uses 32KB microcode
+		 * bit:3 is clear SE uses 64KB microcode
+		 */
+		offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
+		nitrox_write_csr(ndev, offset, 0x8);
+	}
+	usleep_range(300, 400);
+}
+
+static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
+{
+	const struct firmware *fw;
+	struct ucode *ucode;
+	int ret;
+
+	dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
+
+	ret = request_firmware(&fw, fw_name, DEV(ndev));
+	if (ret < 0) {
+		dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
+		return ret;
+	}
+
+	ucode = (struct ucode *)fw->data;
+	/* copy the firmware version */
+	memcpy(ndev->hw.fw_name, ucode->version, (VERSION_LEN - 2));
+	ndev->hw.fw_name[VERSION_LEN - 1] = '\0';
+
+	write_to_ucd_unit(ndev, ucode);
+	release_firmware(fw);
+
+	set_bit(NITROX_UCODE_LOADED, &ndev->status);
+	/* barrier to sync with other cpus */
+	smp_mb__after_atomic();
+	return 0;
+}
+
+/**
+ * nitrox_add_to_devlist - add NITROX device to global device list
+ * @ndev: NITROX device
+ */
+static int nitrox_add_to_devlist(struct nitrox_device *ndev)
+{
+	struct nitrox_device *dev;
+	int ret = 0;
+
+	INIT_LIST_HEAD(&ndev->list);
+	refcount_set(&ndev->refcnt, 1);
+
+	mutex_lock(&devlist_lock);
+	list_for_each_entry(dev, &ndevlist, list) {
+		if (dev == ndev) {
+			ret = -EEXIST;
+			goto unlock;
+		}
+	}
+	ndev->idx = num_devices++;
+	list_add_tail(&ndev->list, &ndevlist);
+unlock:
+	mutex_unlock(&devlist_lock);
+	return ret;
+}
+
+/**
+ * nitrox_remove_from_devlist - remove NITROX device from
+ *   global device list
+ * @ndev: NITROX device
+ */
+static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
+{
+	mutex_lock(&devlist_lock);
+	list_del(&ndev->list);
+	num_devices--;
+	mutex_unlock(&devlist_lock);
+}
+
+struct nitrox_device *nitrox_get_first_device(void)
+{
+	struct nitrox_device *ndev = NULL;
+
+	mutex_lock(&devlist_lock);
+	list_for_each_entry(ndev, &ndevlist, list) {
+		if (nitrox_ready(ndev))
+			break;
+	}
+	mutex_unlock(&devlist_lock);
+	if (!ndev)
+		return NULL;
+
+	refcount_inc(&ndev->refcnt);
+	/* barrier to sync with other cpus */
+	smp_mb__after_atomic();
+	return ndev;
+}
+
+void nitrox_put_device(struct nitrox_device *ndev)
+{
+	if (!ndev)
+		return;
+
+	refcount_dec(&ndev->refcnt);
+	/* barrier to sync with other cpus */
+	smp_mb__after_atomic();
+}
+
+static int nitrox_reset_device(struct pci_dev *pdev)
+{
+	int pos = 0;
+
+	pos = pci_save_state(pdev);
+	if (pos) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		return -ENOMEM;
+	}
+
+	pos = pci_pcie_cap(pdev);
+	if (!pos)
+		return -ENOTTY;
+
+	if (!pci_wait_for_pending_transaction(pdev))
+		dev_err(&pdev->dev, "waiting for pending transaction\n");
+
+	pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+	msleep(100);
+	pci_restore_state(pdev);
+
+	return 0;
+}
+
+static int nitrox_pf_sw_init(struct nitrox_device *ndev)
+{
+	int err;
+
+	err = nitrox_common_sw_init(ndev);
+	if (err)
+		return err;
+
+	err = nitrox_pf_init_isr(ndev);
+	if (err)
+		nitrox_common_sw_cleanup(ndev);
+
+	return err;
+}
+
+static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
+{
+	nitrox_pf_cleanup_isr(ndev);
+	nitrox_common_sw_cleanup(ndev);
+}
+
+/**
+ * nitrox_bist_check - Check NITORX BIST registers status
+ * @ndev: NITROX device
+ */
+static int nitrox_bist_check(struct nitrox_device *ndev)
+{
+	u64 value = 0;
+	int i;
+
+	for (i = 0; i < NR_CLUSTERS; i++) {
+		value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
+		value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
+	}
+	value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
+	value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
+	value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
+	value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
+	value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
+	value += nitrox_read_csr(ndev, POM_BIST_REG);
+	value += nitrox_read_csr(ndev, BMI_BIST_REG);
+	value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
+	value += nitrox_read_csr(ndev, BMO_BIST_REG);
+	value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
+	value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
+	if (value)
+		return -EIO;
+	return 0;
+}
+
+static void nitrox_get_hwinfo(struct nitrox_device *ndev)
+{
+	union emu_fuse_map emu_fuse;
+	u64 offset;
+	int i;
+
+	for (i = 0; i < NR_CLUSTERS; i++) {
+		u8 dead_cores;
+
+		offset = EMU_FUSE_MAPX(i);
+		emu_fuse.value = nitrox_read_csr(ndev, offset);
+		if (emu_fuse.s.valid) {
+			dead_cores = hweight32(emu_fuse.s.ae_fuse);
+			ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
+			dead_cores = hweight16(emu_fuse.s.se_fuse);
+			ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
+		}
+	}
+}
+
+static int nitrox_pf_hw_init(struct nitrox_device *ndev)
+{
+	int err;
+
+	err = nitrox_bist_check(ndev);
+	if (err) {
+		dev_err(&ndev->pdev->dev, "BIST check failed\n");
+		return err;
+	}
+	/* get cores information */
+	nitrox_get_hwinfo(ndev);
+
+	nitrox_config_nps_unit(ndev);
+	nitrox_config_pom_unit(ndev);
+	nitrox_config_efl_unit(ndev);
+	/* configure IO units */
+	nitrox_config_bmi_unit(ndev);
+	nitrox_config_bmo_unit(ndev);
+	/* configure Local Buffer Cache */
+	nitrox_config_lbc_unit(ndev);
+	nitrox_config_rand_unit(ndev);
+
+	/* load firmware on SE cores */
+	err = nitrox_load_fw(ndev, SE_FW);
+	if (err)
+		return err;
+
+	nitrox_config_emu_unit(ndev);
+
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int registers_show(struct seq_file *s, void *v)
+{
+	struct nitrox_device *ndev = s->private;
+	u64 offset;
+
+	/* NPS DMA stats */
+	offset = NPS_STATS_PKT_DMA_RD_CNT;
+	seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT  0x%016llx\n",
+		   nitrox_read_csr(ndev, offset));
+	offset = NPS_STATS_PKT_DMA_WR_CNT;
+	seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT  0x%016llx\n",
+		   nitrox_read_csr(ndev, offset));
+
+	/* BMI/BMO stats */
+	offset = BMI_NPS_PKT_CNT;
+	seq_printf(s, "BMI_NPS_PKT_CNT  0x%016llx\n",
+		   nitrox_read_csr(ndev, offset));
+	offset = BMO_NPS_SLC_PKT_CNT;
+	seq_printf(s, "BMO_NPS_PKT_CNT  0x%016llx\n",
+		   nitrox_read_csr(ndev, offset));
+
+	return 0;
+}
+
+static int registers_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, registers_show, inode->i_private);
+}
+
+static const struct file_operations register_fops = {
+	.owner = THIS_MODULE,
+	.open = registers_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int firmware_show(struct seq_file *s, void *v)
+{
+	struct nitrox_device *ndev = s->private;
+
+	seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
+	return 0;
+}
+
+static int firmware_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, firmware_show, inode->i_private);
+}
+
+static const struct file_operations firmware_fops = {
+	.owner = THIS_MODULE,
+	.open = firmware_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int nitrox_show(struct seq_file *s, void *v)
+{
+	struct nitrox_device *ndev = s->private;
+
+	seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx);
+	seq_printf(s, "  Revision ID: 0x%0x\n", ndev->hw.revision_id);
+	seq_printf(s, "  Cores [AE: %u  SE: %u]\n",
+		   ndev->hw.ae_cores, ndev->hw.se_cores);
+	seq_printf(s, "  Number of Queues: %u\n", ndev->nr_queues);
+	seq_printf(s, "  Queue length: %u\n", ndev->qlen);
+	seq_printf(s, "  Node: %u\n", ndev->node);
+
+	return 0;
+}
+
+static int nitrox_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nitrox_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_fops = {
+	.owner = THIS_MODULE,
+	.open = nitrox_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{
+	debugfs_remove_recursive(ndev->debugfs_dir);
+	ndev->debugfs_dir = NULL;
+}
+
+static int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+	struct dentry *dir, *f;
+
+	dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!dir)
+		return -ENOMEM;
+
+	ndev->debugfs_dir = dir;
+	f = debugfs_create_file("counters", 0400, dir, ndev, &register_fops);
+	if (!f)
+		goto err;
+	f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+	if (!f)
+		goto err;
+	f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops);
+	if (!f)
+		goto err;
+
+	return 0;
+
+err:
+	nitrox_debugfs_exit(ndev);
+	return -ENODEV;
+}
+#else
+static int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+	return 0;
+}
+
+static void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{
+}
+#endif
+
+/**
+ * nitrox_probe - NITROX Initialization function.
+ * @pdev: PCI device information struct
+ * @id: entry in nitrox_pci_tbl
+ *
+ * Return: 0, if the driver is bound to the device, or
+ *         a negative error if there is failure.
+ */
+static int nitrox_probe(struct pci_dev *pdev,
+			const struct pci_device_id *id)
+{
+	struct nitrox_device *ndev;
+	int err;
+
+	dev_info_once(&pdev->dev, "%s driver version %s\n",
+		      nitrox_driver_name, DRIVER_VERSION);
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	/* do FLR */
+	err = nitrox_reset_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "FLR failed\n");
+		pci_disable_device(pdev);
+		return err;
+	}
+
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
+		dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
+	} else {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "DMA configuration failed\n");
+			pci_disable_device(pdev);
+			return err;
+		}
+	}
+
+	err = pci_request_mem_regions(pdev, nitrox_driver_name);
+	if (err) {
+		pci_disable_device(pdev);
+		return err;
+	}
+	pci_set_master(pdev);
+
+	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+	if (!ndev) {
+		err = -ENOMEM;
+		goto ndev_fail;
+	}
+
+	pci_set_drvdata(pdev, ndev);
+	ndev->pdev = pdev;
+
+	/* add to device list */
+	nitrox_add_to_devlist(ndev);
+
+	ndev->hw.vendor_id = pdev->vendor;
+	ndev->hw.device_id = pdev->device;
+	ndev->hw.revision_id = pdev->revision;
+	/* command timeout in jiffies */
+	ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
+	ndev->node = dev_to_node(&pdev->dev);
+	if (ndev->node == NUMA_NO_NODE)
+		ndev->node = 0;
+
+	ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
+				 pci_resource_len(pdev, 0));
+	if (!ndev->bar_addr) {
+		err = -EIO;
+		goto ioremap_err;
+	}
+	/* allocate command queus based on cpus, max queues are 64 */
+	ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
+	ndev->qlen = qlen;
+
+	err = nitrox_pf_sw_init(ndev);
+	if (err)
+		goto ioremap_err;
+
+	err = nitrox_pf_hw_init(ndev);
+	if (err)
+		goto pf_hw_fail;
+
+	err = nitrox_debugfs_init(ndev);
+	if (err)
+		goto pf_hw_fail;
+
+	set_bit(NITROX_READY, &ndev->status);
+	/* barrier to sync with other cpus */
+	smp_mb__after_atomic();
+
+	err = nitrox_crypto_register();
+	if (err)
+		goto crypto_fail;
+
+	return 0;
+
+crypto_fail:
+	nitrox_debugfs_exit(ndev);
+	clear_bit(NITROX_READY, &ndev->status);
+	/* barrier to sync with other cpus */
+	smp_mb__after_atomic();
+pf_hw_fail:
+	nitrox_pf_sw_cleanup(ndev);
+ioremap_err:
+	nitrox_remove_from_devlist(ndev);
+	kfree(ndev);
+	pci_set_drvdata(pdev, NULL);
+ndev_fail:
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * nitrox_remove - Unbind the driver from the device.
+ * @pdev: PCI device information struct
+ */
+static void nitrox_remove(struct pci_dev *pdev)
+{
+	struct nitrox_device *ndev = pci_get_drvdata(pdev);
+
+	if (!ndev)
+		return;
+
+	if (!refcount_dec_and_test(&ndev->refcnt)) {
+		dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
+			refcount_read(&ndev->refcnt));
+		return;
+	}
+
+	dev_info(DEV(ndev), "Removing Device %x:%x\n",
+		 ndev->hw.vendor_id, ndev->hw.device_id);
+
+	clear_bit(NITROX_READY, &ndev->status);
+	/* barrier to sync with other cpus */
+	smp_mb__after_atomic();
+
+	nitrox_remove_from_devlist(ndev);
+	nitrox_crypto_unregister();
+	nitrox_debugfs_exit(ndev);
+	nitrox_pf_sw_cleanup(ndev);
+
+	iounmap(ndev->bar_addr);
+	kfree(ndev);
+
+	pci_set_drvdata(pdev, NULL);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static void nitrox_shutdown(struct pci_dev *pdev)
+{
+	pci_set_drvdata(pdev, NULL);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver nitrox_driver = {
+	.name = nitrox_driver_name,
+	.id_table = nitrox_pci_tbl,
+	.probe = nitrox_probe,
+	.remove	= nitrox_remove,
+	.shutdown = nitrox_shutdown,
+};
+
+module_pci_driver(nitrox_driver);
+
+MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
+MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_FIRMWARE(SE_FW);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
new file mode 100644
index 0000000..d091b6f
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -0,0 +1,446 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_REQ_H
+#define __NITROX_REQ_H
+
+#include <linux/dma-mapping.h>
+#include <crypto/aes.h>
+
+#include "nitrox_dev.h"
+
+/**
+ * struct gphdr - General purpose Header
+ * @param0: first parameter.
+ * @param1: second parameter.
+ * @param2: third parameter.
+ * @param3: fourth parameter.
+ *
+ * Params tell the iv and enc/dec data offsets.
+ */
+struct gphdr {
+	__be16 param0;
+	__be16 param1;
+	__be16 param2;
+	__be16 param3;
+};
+
+/**
+ * struct se_req_ctrl - SE request information.
+ * @arg: Minor number of the opcode
+ * @ctxc: Context control.
+ * @unca: Uncertainity enabled.
+ * @info: Additional information for SE cores.
+ * @ctxl: Context length in bytes.
+ * @uddl: User defined data length
+ */
+union se_req_ctrl {
+	u64 value;
+	struct {
+		u64 raz	: 22;
+		u64 arg	: 8;
+		u64 ctxc : 2;
+		u64 unca : 1;
+		u64 info : 3;
+		u64 unc : 8;
+		u64 ctxl : 12;
+		u64 uddl : 8;
+	} s;
+};
+
+struct nitrox_sglist {
+	u16 len;
+	u16 raz0;
+	u32 raz1;
+	dma_addr_t dma;
+};
+
+#define MAX_IV_LEN 16
+
+/**
+ * struct se_crypto_request - SE crypto request structure.
+ * @opcode: Request opcode (enc/dec)
+ * @flags: flags from crypto subsystem
+ * @ctx_handle: Crypto context handle.
+ * @gph: GP Header
+ * @ctrl: Request Information.
+ * @in: Input sglist
+ * @out: Output sglist
+ */
+struct se_crypto_request {
+	u8 opcode;
+	gfp_t gfp;
+	u32 flags;
+	u64 ctx_handle;
+
+	struct gphdr gph;
+	union se_req_ctrl ctrl;
+
+	u8 iv[MAX_IV_LEN];
+	u16 ivsize;
+
+	struct scatterlist *src;
+	struct scatterlist *dst;
+};
+
+/* Crypto opcodes */
+#define FLEXI_CRYPTO_ENCRYPT_HMAC	0x33
+#define ENCRYPT	0
+#define DECRYPT 1
+
+/* IV from context */
+#define IV_FROM_CTX	0
+/* IV from Input data */
+#define IV_FROM_DPTR	1
+
+/**
+ * cipher opcodes for firmware
+ */
+enum flexi_cipher {
+	CIPHER_NULL = 0,
+	CIPHER_3DES_CBC,
+	CIPHER_3DES_ECB,
+	CIPHER_AES_CBC,
+	CIPHER_AES_ECB,
+	CIPHER_AES_CFB,
+	CIPHER_AES_CTR,
+	CIPHER_AES_GCM,
+	CIPHER_AES_XTS,
+	CIPHER_AES_CCM,
+	CIPHER_AES_CBC_CTS,
+	CIPHER_AES_ECB_CTS,
+	CIPHER_INVALID
+};
+
+/**
+ * struct crypto_keys - Crypto keys
+ * @key: Encryption key or KEY1 for AES-XTS
+ * @iv: Encryption IV or Tweak for AES-XTS
+ */
+struct crypto_keys {
+	union {
+		u8 key[AES_MAX_KEY_SIZE];
+		u8 key1[AES_MAX_KEY_SIZE];
+	} u;
+	u8 iv[AES_BLOCK_SIZE];
+};
+
+/**
+ * struct auth_keys - Authentication keys
+ * @ipad: IPAD or KEY2 for AES-XTS
+ * @opad: OPAD or AUTH KEY if auth_input_type = 1
+ */
+struct auth_keys {
+	union {
+		u8 ipad[64];
+		u8 key2[64];
+	} u;
+	u8 opad[64];
+};
+
+/**
+ * struct flexi_crypto_context - Crypto context
+ * @cipher_type: Encryption cipher type
+ * @aes_keylen: AES key length
+ * @iv_source: Encryption IV source
+ * @hash_type: Authentication type
+ * @auth_input_type: Authentication input type
+ *   1 - Authentication IV and KEY, microcode calculates OPAD/IPAD
+ *   0 - Authentication OPAD/IPAD
+ * @mac_len: mac length
+ * @crypto: Crypto keys
+ * @auth: Authentication keys
+ */
+struct flexi_crypto_context {
+	union {
+		__be64 flags;
+		struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+			u64 cipher_type	: 4;
+			u64 reserved_59	: 1;
+			u64 aes_keylen : 2;
+			u64 iv_source : 1;
+			u64 hash_type : 4;
+			u64 reserved_49_51 : 3;
+			u64 auth_input_type: 1;
+			u64 mac_len : 8;
+			u64 reserved_0_39 : 40;
+#else
+			u64 reserved_0_39 : 40;
+			u64 mac_len : 8;
+			u64 auth_input_type: 1;
+			u64 reserved_49_51 : 3;
+			u64 hash_type : 4;
+			u64 iv_source : 1;
+			u64 aes_keylen : 2;
+			u64 reserved_59	: 1;
+			u64 cipher_type	: 4;
+#endif
+		} w0;
+	};
+
+	struct crypto_keys crypto;
+	struct auth_keys auth;
+};
+
+struct nitrox_crypto_ctx {
+	struct nitrox_device *ndev;
+	union {
+		u64 ctx_handle;
+		struct flexi_crypto_context *fctx;
+	} u;
+};
+
+struct nitrox_kcrypt_request {
+	struct se_crypto_request creq;
+	struct nitrox_crypto_ctx *nctx;
+	struct skcipher_request *skreq;
+};
+
+/**
+ * struct pkt_instr_hdr - Packet Instruction Header
+ * @g: Gather used
+ *   When [G] is set and [GSZ] != 0, the instruction is
+ *   indirect gather instruction.
+ *   When [G] is set and [GSZ] = 0, the instruction is
+ *   direct gather instruction.
+ * @gsz: Number of pointers in the indirect gather list
+ * @ihi: When set hardware duplicates the 1st 8 bytes of pkt_instr_hdr
+ *   and adds them to the packet after the pkt_instr_hdr but before any UDD
+ * @ssz: Not used by the input hardware. But can become slc_store_int[SSZ]
+ *   when [IHI] is set.
+ * @fsz: The number of front data bytes directly included in the
+ *   PCIe instruction.
+ * @tlen: The length of the input packet in bytes, include:
+ *   - 16B pkt_hdr
+ *   - Inline context bytes if any,
+ *   - UDD if any,
+ *   - packet payload bytes
+ */
+union pkt_instr_hdr {
+	u64 value;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 raz_48_63 : 16;
+		u64 g : 1;
+		u64 gsz	: 7;
+		u64 ihi	: 1;
+		u64 ssz	: 7;
+		u64 raz_30_31 : 2;
+		u64 fsz	: 6;
+		u64 raz_16_23 : 8;
+		u64 tlen : 16;
+#else
+		u64 tlen : 16;
+		u64 raz_16_23 : 8;
+		u64 fsz	: 6;
+		u64 raz_30_31 : 2;
+		u64 ssz	: 7;
+		u64 ihi	: 1;
+		u64 gsz	: 7;
+		u64 g : 1;
+		u64 raz_48_63 : 16;
+#endif
+	} s;
+};
+
+/**
+ * struct pkt_hdr - Packet Input Header
+ * @opcode: Request opcode (Major)
+ * @arg: Request opcode (Minor)
+ * @ctxc: Context control.
+ * @unca: When set [UNC] is the uncertainty count for an input packet.
+ *        The hardware uses uncertainty counts to predict
+ *        output buffer use and avoid deadlock.
+ * @info: Not used by input hardware. Available for use
+ *        during SE processing.
+ * @destport: The expected destination port/ring/channel for the packet.
+ * @unc: Uncertainty count for an input packet.
+ * @grp: SE group that will process the input packet.
+ * @ctxl: Context Length in 64-bit words.
+ * @uddl: User-defined data (UDD) length in bytes.
+ * @ctxp: Context pointer. CTXP<63,2:0> must be zero in all cases.
+ */
+union pkt_hdr {
+	u64 value[2];
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 opcode : 8;
+		u64 arg	: 8;
+		u64 ctxc : 2;
+		u64 unca : 1;
+		u64 raz_44 : 1;
+		u64 info : 3;
+		u64 destport : 9;
+		u64 unc	: 8;
+		u64 raz_19_23 : 5;
+		u64 grp	: 3;
+		u64 raz_15 : 1;
+		u64 ctxl : 7;
+		u64 uddl : 8;
+#else
+		u64 uddl : 8;
+		u64 ctxl : 7;
+		u64 raz_15 : 1;
+		u64 grp	: 3;
+		u64 raz_19_23 : 5;
+		u64 unc	: 8;
+		u64 destport : 9;
+		u64 info : 3;
+		u64 raz_44 : 1;
+		u64 unca : 1;
+		u64 ctxc : 2;
+		u64 arg	: 8;
+		u64 opcode : 8;
+#endif
+		__be64 ctxp;
+	} s;
+};
+
+/**
+ * struct slc_store_info - Solicited Paceket Output Store Information.
+ * @ssz: The number of scatterlist pointers for the solicited output port
+ *       packet.
+ * @rptr: The result pointer for the solicited output port packet.
+ *        If [SSZ]=0, [RPTR] must point directly to a buffer on the remote
+ *        host that is large enough to hold the entire output packet.
+ *        If [SSZ]!=0, [RPTR] must point to an array of ([SSZ]+3)/4
+ *        sglist components at [RPTR] on the remote host.
+ */
+union slc_store_info {
+	u64 value[2];
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 raz_39_63 : 25;
+		u64 ssz	: 7;
+		u64 raz_0_31 : 32;
+#else
+		u64 raz_0_31 : 32;
+		u64 ssz	: 7;
+		u64 raz_39_63 : 25;
+#endif
+		__be64 rptr;
+	} s;
+};
+
+/**
+ * struct nps_pkt_instr - NPS Packet Instruction of SE cores.
+ * @dptr0 : Input pointer points to buffer in remote host.
+ * @ih: Packet Instruction Header (8 bytes)
+ * @irh: Packet Input Header (16 bytes)
+ * @slc: Solicited Packet Output Store Information (16 bytes)
+ * @fdata: Front data
+ *
+ * 64-Byte Instruction Format
+ */
+struct nps_pkt_instr {
+	__be64 dptr0;
+	union pkt_instr_hdr ih;
+	union pkt_hdr irh;
+	union slc_store_info slc;
+	u64 fdata[2];
+};
+
+/**
+ * struct ctx_hdr - Book keeping data about the crypto context
+ * @pool: Pool used to allocate crypto context
+ * @dma: Base DMA address of the cypto context
+ * @ctx_dma: Actual usable crypto context for NITROX
+ */
+struct ctx_hdr {
+	struct dma_pool *pool;
+	dma_addr_t dma;
+	dma_addr_t ctx_dma;
+};
+
+/*
+ * struct sglist_component - SG list component format
+ * @len0: The number of bytes at [PTR0] on the remote host.
+ * @len1: The number of bytes at [PTR1] on the remote host.
+ * @len2: The number of bytes at [PTR2] on the remote host.
+ * @len3: The number of bytes at [PTR3] on the remote host.
+ * @dma0: First pointer point to buffer in remote host.
+ * @dma1: Second pointer point to buffer in remote host.
+ * @dma2: Third pointer point to buffer in remote host.
+ * @dma3: Fourth pointer point to buffer in remote host.
+ */
+struct nitrox_sgcomp {
+	__be16 len[4];
+	__be64 dma[4];
+};
+
+/*
+ * strutct nitrox_sgtable - SG list information
+ * @map_cnt: Number of buffers mapped
+ * @nr_comp: Number of sglist components
+ * @total_bytes: Total bytes in sglist.
+ * @len: Total sglist components length.
+ * @dma: DMA address of sglist component.
+ * @dir: DMA direction.
+ * @buf: crypto request buffer.
+ * @sglist: SG list of input/output buffers.
+ * @sgcomp: sglist component for NITROX.
+ */
+struct nitrox_sgtable {
+	u8 map_bufs_cnt;
+	u8 nr_sgcomp;
+	u16 total_bytes;
+	u32 len;
+	dma_addr_t dma;
+	enum dma_data_direction dir;
+
+	struct scatterlist *buf;
+	struct nitrox_sglist *sglist;
+	struct nitrox_sgcomp *sgcomp;
+};
+
+/* Response Header Length */
+#define ORH_HLEN	8
+/* Completion bytes Length */
+#define COMP_HLEN	8
+
+struct resp_hdr {
+	u64 orh;
+	dma_addr_t orh_dma;
+	u64 completion;
+	dma_addr_t completion_dma;
+};
+
+typedef void (*completion_t)(struct skcipher_request *skreq, int err);
+
+/**
+ * struct nitrox_softreq - Represents the NIROX Request.
+ * @response: response list entry
+ * @backlog: Backlog list entry
+ * @ndev: Device used to submit the request
+ * @cmdq: Command queue for submission
+ * @resp: Response headers
+ * @instr: 64B instruction
+ * @in: SG table for input
+ * @out SG table for output
+ * @tstamp: Request submitted time in jiffies
+ * @callback: callback after request completion/timeout
+ * @cb_arg: callback argument
+ */
+struct nitrox_softreq {
+	struct list_head response;
+	struct list_head backlog;
+
+	u32 flags;
+	gfp_t gfp;
+	atomic_t status;
+	bool inplace;
+
+	struct nitrox_device *ndev;
+	struct nitrox_cmdq *cmdq;
+
+	struct nps_pkt_instr instr;
+	struct resp_hdr resp;
+	struct nitrox_sgtable in;
+	struct nitrox_sgtable out;
+
+	unsigned long tstamp;
+
+	completion_t callback;
+	struct skcipher_request *skreq;
+};
+
+#endif /* __NITROX_REQ_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
new file mode 100644
index 0000000..4a362fc
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/gfp.h>
+#include <linux/workqueue.h>
+#include <crypto/internal/skcipher.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_req.h"
+#include "nitrox_csr.h"
+
+/* SLC_STORE_INFO */
+#define MIN_UDD_LEN 16
+/* PKT_IN_HDR + SLC_STORE_INFO */
+#define FDATA_SIZE 32
+/* Base destination port for the solicited requests */
+#define SOLICIT_BASE_DPORT 256
+#define PENDING_SIG	0xFFFFFFFFFFFFFFFFUL
+
+#define REQ_NOT_POSTED 1
+#define REQ_BACKLOG    2
+#define REQ_POSTED     3
+
+/**
+ * Response codes from SE microcode
+ * 0x00 - Success
+ *   Completion with no error
+ * 0x43 - ERR_GC_DATA_LEN_INVALID
+ *   Invalid Data length if Encryption Data length is
+ *   less than 16 bytes for AES-XTS and AES-CTS.
+ * 0x45 - ERR_GC_CTX_LEN_INVALID
+ *   Invalid context length: CTXL != 23 words.
+ * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
+ *   DOCSIS support is enabled with other than
+ *   AES/DES-CBC mode encryption.
+ * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
+ *   Authentication offset is other than 0 with
+ *   Encryption IV source = 0.
+ *   Authentication offset is other than 8 (DES)/16 (AES)
+ *   with Encryption IV source = 1
+ * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
+ *   CRC32 is enabled for other than DOCSIS encryption.
+ * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
+ *   Invalid flag options in AES-CCM IV.
+ */
+
+static inline int incr_index(int index, int count, int max)
+{
+	if ((index + count) >= max)
+		index = index + count - max;
+	else
+		index += count;
+
+	return index;
+}
+
+/**
+ * dma_free_sglist - unmap and free the sg lists.
+ * @ndev: N5 device
+ * @sgtbl: SG table
+ */
+static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
+{
+	struct nitrox_device *ndev = sr->ndev;
+	struct device *dev = DEV(ndev);
+	struct nitrox_sglist *sglist;
+
+	/* unmap in sgbuf */
+	sglist = sr->in.sglist;
+	if (!sglist)
+		goto out_unmap;
+
+	/* unmap iv */
+	dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
+	/* unmpa src sglist */
+	dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
+	/* unamp gather component */
+	dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
+	kfree(sr->in.sglist);
+	kfree(sr->in.sgcomp);
+	sr->in.sglist = NULL;
+	sr->in.buf = NULL;
+	sr->in.map_bufs_cnt = 0;
+
+out_unmap:
+	/* unmap out sgbuf */
+	sglist = sr->out.sglist;
+	if (!sglist)
+		return;
+
+	/* unmap orh */
+	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
+
+	/* unmap dst sglist */
+	if (!sr->inplace) {
+		dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
+			     sr->out.dir);
+	}
+	/* unmap completion */
+	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
+
+	/* unmap scatter component */
+	dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
+	kfree(sr->out.sglist);
+	kfree(sr->out.sgcomp);
+	sr->out.sglist = NULL;
+	sr->out.buf = NULL;
+	sr->out.map_bufs_cnt = 0;
+}
+
+static void softreq_destroy(struct nitrox_softreq *sr)
+{
+	softreq_unmap_sgbufs(sr);
+	kfree(sr);
+}
+
+/**
+ * create_sg_component - create SG componets for N5 device.
+ * @sr: Request structure
+ * @sgtbl: SG table
+ * @nr_comp: total number of components required
+ *
+ * Component structure
+ *
+ *   63     48 47     32 31    16 15      0
+ *   --------------------------------------
+ *   |   LEN0  |  LEN1  |  LEN2  |  LEN3  |
+ *   |-------------------------------------
+ *   |               PTR0                 |
+ *   --------------------------------------
+ *   |               PTR1                 |
+ *   --------------------------------------
+ *   |               PTR2                 |
+ *   --------------------------------------
+ *   |               PTR3                 |
+ *   --------------------------------------
+ *
+ *   Returns 0 if success or a negative errno code on error.
+ */
+static int create_sg_component(struct nitrox_softreq *sr,
+			       struct nitrox_sgtable *sgtbl, int map_nents)
+{
+	struct nitrox_device *ndev = sr->ndev;
+	struct nitrox_sgcomp *sgcomp;
+	struct nitrox_sglist *sglist;
+	dma_addr_t dma;
+	size_t sz_comp;
+	int i, j, nr_sgcomp;
+
+	nr_sgcomp = roundup(map_nents, 4) / 4;
+
+	/* each component holds 4 dma pointers */
+	sz_comp = nr_sgcomp * sizeof(*sgcomp);
+	sgcomp = kzalloc(sz_comp, sr->gfp);
+	if (!sgcomp)
+		return -ENOMEM;
+
+	sgtbl->sgcomp = sgcomp;
+	sgtbl->nr_sgcomp = nr_sgcomp;
+
+	sglist = sgtbl->sglist;
+	/* populate device sg component */
+	for (i = 0; i < nr_sgcomp; i++) {
+		for (j = 0; j < 4; j++) {
+			sgcomp->len[j] = cpu_to_be16(sglist->len);
+			sgcomp->dma[j] = cpu_to_be64(sglist->dma);
+			sglist++;
+		}
+		sgcomp++;
+	}
+	/* map the device sg component */
+	dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
+	if (dma_mapping_error(DEV(ndev), dma)) {
+		kfree(sgtbl->sgcomp);
+		sgtbl->sgcomp = NULL;
+		return -ENOMEM;
+	}
+
+	sgtbl->dma = dma;
+	sgtbl->len = sz_comp;
+
+	return 0;
+}
+
+/**
+ * dma_map_inbufs - DMA map input sglist and creates sglist component
+ *                  for N5 device.
+ * @sr: Request structure
+ * @req: Crypto request structre
+ *
+ * Returns 0 if successful or a negative errno code on error.
+ */
+static int dma_map_inbufs(struct nitrox_softreq *sr,
+			  struct se_crypto_request *req)
+{
+	struct device *dev = DEV(sr->ndev);
+	struct scatterlist *sg = req->src;
+	struct nitrox_sglist *glist;
+	int i, nents, ret = 0;
+	dma_addr_t dma;
+	size_t sz;
+
+	nents = sg_nents(req->src);
+
+	/* creater gather list IV and src entries */
+	sz = roundup((1 + nents), 4) * sizeof(*glist);
+	glist = kzalloc(sz, sr->gfp);
+	if (!glist)
+		return -ENOMEM;
+
+	sr->in.sglist = glist;
+	/* map IV */
+	dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, dma)) {
+		ret = -EINVAL;
+		goto iv_map_err;
+	}
+
+	sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+	/* map src entries */
+	nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
+	if (!nents) {
+		ret = -EINVAL;
+		goto src_map_err;
+	}
+	sr->in.buf = req->src;
+
+	/* store the mappings */
+	glist->len = req->ivsize;
+	glist->dma = dma;
+	glist++;
+	sr->in.total_bytes += req->ivsize;
+
+	for_each_sg(req->src, sg, nents, i) {
+		glist->len = sg_dma_len(sg);
+		glist->dma = sg_dma_address(sg);
+		sr->in.total_bytes += glist->len;
+		glist++;
+	}
+	/* roundup map count to align with entires in sg component */
+	sr->in.map_bufs_cnt = (1 + nents);
+
+	/* create NITROX gather component */
+	ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
+	if (ret)
+		goto incomp_err;
+
+	return 0;
+
+incomp_err:
+	dma_unmap_sg(dev, req->src, nents, sr->in.dir);
+	sr->in.map_bufs_cnt = 0;
+src_map_err:
+	dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
+iv_map_err:
+	kfree(sr->in.sglist);
+	sr->in.sglist = NULL;
+	return ret;
+}
+
+static int dma_map_outbufs(struct nitrox_softreq *sr,
+			   struct se_crypto_request *req)
+{
+	struct device *dev = DEV(sr->ndev);
+	struct nitrox_sglist *glist = sr->in.sglist;
+	struct nitrox_sglist *slist;
+	struct scatterlist *sg;
+	int i, nents, map_bufs_cnt, ret = 0;
+	size_t sz;
+
+	nents = sg_nents(req->dst);
+
+	/* create scatter list ORH, IV, dst entries and Completion header */
+	sz = roundup((3 + nents), 4) * sizeof(*slist);
+	slist = kzalloc(sz, sr->gfp);
+	if (!slist)
+		return -ENOMEM;
+
+	sr->out.sglist = slist;
+	sr->out.dir = DMA_BIDIRECTIONAL;
+	/* map ORH */
+	sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
+					  sr->out.dir);
+	if (dma_mapping_error(dev, sr->resp.orh_dma)) {
+		ret = -EINVAL;
+		goto orh_map_err;
+	}
+
+	/* map completion */
+	sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
+						 COMP_HLEN, sr->out.dir);
+	if (dma_mapping_error(dev, sr->resp.completion_dma)) {
+		ret = -EINVAL;
+		goto compl_map_err;
+	}
+
+	sr->inplace = (req->src == req->dst) ? true : false;
+	/* out place */
+	if (!sr->inplace) {
+		nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
+		if (!nents) {
+			ret = -EINVAL;
+			goto dst_map_err;
+		}
+	}
+	sr->out.buf = req->dst;
+
+	/* store the mappings */
+	/* orh */
+	slist->len = ORH_HLEN;
+	slist->dma = sr->resp.orh_dma;
+	slist++;
+
+	/* copy the glist mappings */
+	if (sr->inplace) {
+		nents = sr->in.map_bufs_cnt - 1;
+		map_bufs_cnt = sr->in.map_bufs_cnt;
+		while (map_bufs_cnt--) {
+			slist->len = glist->len;
+			slist->dma = glist->dma;
+			slist++;
+			glist++;
+		}
+	} else {
+		/* copy iv mapping */
+		slist->len = glist->len;
+		slist->dma = glist->dma;
+		slist++;
+		/* copy remaining maps */
+		for_each_sg(req->dst, sg, nents, i) {
+			slist->len = sg_dma_len(sg);
+			slist->dma = sg_dma_address(sg);
+			slist++;
+		}
+	}
+
+	/* completion */
+	slist->len = COMP_HLEN;
+	slist->dma = sr->resp.completion_dma;
+
+	sr->out.map_bufs_cnt = (3 + nents);
+
+	ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
+	if (ret)
+		goto outcomp_map_err;
+
+	return 0;
+
+outcomp_map_err:
+	if (!sr->inplace)
+		dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
+	sr->out.map_bufs_cnt = 0;
+	sr->out.buf = NULL;
+dst_map_err:
+	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
+	sr->resp.completion_dma = 0;
+compl_map_err:
+	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
+	sr->resp.orh_dma = 0;
+orh_map_err:
+	kfree(sr->out.sglist);
+	sr->out.sglist = NULL;
+	return ret;
+}
+
+static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
+				    struct se_crypto_request *creq)
+{
+	int ret;
+
+	ret = dma_map_inbufs(sr, creq);
+	if (ret)
+		return ret;
+
+	ret = dma_map_outbufs(sr, creq);
+	if (ret)
+		softreq_unmap_sgbufs(sr);
+
+	return ret;
+}
+
+static inline void backlog_list_add(struct nitrox_softreq *sr,
+				    struct nitrox_cmdq *cmdq)
+{
+	INIT_LIST_HEAD(&sr->backlog);
+
+	spin_lock_bh(&cmdq->backlog_lock);
+	list_add_tail(&sr->backlog, &cmdq->backlog_head);
+	atomic_inc(&cmdq->backlog_count);
+	atomic_set(&sr->status, REQ_BACKLOG);
+	spin_unlock_bh(&cmdq->backlog_lock);
+}
+
+static inline void response_list_add(struct nitrox_softreq *sr,
+				     struct nitrox_cmdq *cmdq)
+{
+	INIT_LIST_HEAD(&sr->response);
+
+	spin_lock_bh(&cmdq->response_lock);
+	list_add_tail(&sr->response, &cmdq->response_head);
+	spin_unlock_bh(&cmdq->response_lock);
+}
+
+static inline void response_list_del(struct nitrox_softreq *sr,
+				     struct nitrox_cmdq *cmdq)
+{
+	spin_lock_bh(&cmdq->response_lock);
+	list_del(&sr->response);
+	spin_unlock_bh(&cmdq->response_lock);
+}
+
+static struct nitrox_softreq *
+get_first_response_entry(struct nitrox_cmdq *cmdq)
+{
+	return list_first_entry_or_null(&cmdq->response_head,
+					struct nitrox_softreq, response);
+}
+
+static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
+{
+	if (atomic_inc_return(&cmdq->pending_count) > qlen) {
+		atomic_dec(&cmdq->pending_count);
+		/* sync with other cpus */
+		smp_mb__after_atomic();
+		return true;
+	}
+	return false;
+}
+
+/**
+ * post_se_instr - Post SE instruction to Packet Input ring
+ * @sr: Request structure
+ *
+ * Returns 0 if successful or a negative error code,
+ * if no space in ring.
+ */
+static void post_se_instr(struct nitrox_softreq *sr,
+			  struct nitrox_cmdq *cmdq)
+{
+	struct nitrox_device *ndev = sr->ndev;
+	int idx;
+	u8 *ent;
+
+	spin_lock_bh(&cmdq->cmdq_lock);
+
+	idx = cmdq->write_idx;
+	/* copy the instruction */
+	ent = cmdq->head + (idx * cmdq->instr_size);
+	memcpy(ent, &sr->instr, cmdq->instr_size);
+
+	atomic_set(&sr->status, REQ_POSTED);
+	response_list_add(sr, cmdq);
+	sr->tstamp = jiffies;
+	/* flush the command queue updates */
+	dma_wmb();
+
+	/* Ring doorbell with count 1 */
+	writeq(1, cmdq->dbell_csr_addr);
+	/* orders the doorbell rings */
+	mmiowb();
+
+	cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
+
+	spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
+{
+	struct nitrox_device *ndev = cmdq->ndev;
+	struct nitrox_softreq *sr, *tmp;
+	int ret = 0;
+
+	if (!atomic_read(&cmdq->backlog_count))
+		return 0;
+
+	spin_lock_bh(&cmdq->backlog_lock);
+
+	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
+		struct skcipher_request *skreq;
+
+		/* submit until space available */
+		if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
+			ret = -ENOSPC;
+			break;
+		}
+		/* delete from backlog list */
+		list_del(&sr->backlog);
+		atomic_dec(&cmdq->backlog_count);
+		/* sync with other cpus */
+		smp_mb__after_atomic();
+
+		skreq = sr->skreq;
+		/* post the command */
+		post_se_instr(sr, cmdq);
+
+		/* backlog requests are posted, wakeup with -EINPROGRESS */
+		skcipher_request_complete(skreq, -EINPROGRESS);
+	}
+	spin_unlock_bh(&cmdq->backlog_lock);
+
+	return ret;
+}
+
+static int nitrox_enqueue_request(struct nitrox_softreq *sr)
+{
+	struct nitrox_cmdq *cmdq = sr->cmdq;
+	struct nitrox_device *ndev = sr->ndev;
+
+	/* try to post backlog requests */
+	post_backlog_cmds(cmdq);
+
+	if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
+		if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -ENOSPC;
+		/* add to backlog list */
+		backlog_list_add(sr, cmdq);
+		return -EBUSY;
+	}
+	post_se_instr(sr, cmdq);
+
+	return -EINPROGRESS;
+}
+
+/**
+ * nitrox_se_request - Send request to SE core
+ * @ndev: NITROX device
+ * @req: Crypto request
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int nitrox_process_se_request(struct nitrox_device *ndev,
+			      struct se_crypto_request *req,
+			      completion_t callback,
+			      struct skcipher_request *skreq)
+{
+	struct nitrox_softreq *sr;
+	dma_addr_t ctx_handle = 0;
+	int qno, ret = 0;
+
+	if (!nitrox_ready(ndev))
+		return -ENODEV;
+
+	sr = kzalloc(sizeof(*sr), req->gfp);
+	if (!sr)
+		return -ENOMEM;
+
+	sr->ndev = ndev;
+	sr->flags = req->flags;
+	sr->gfp = req->gfp;
+	sr->callback = callback;
+	sr->skreq = skreq;
+
+	atomic_set(&sr->status, REQ_NOT_POSTED);
+
+	WRITE_ONCE(sr->resp.orh, PENDING_SIG);
+	WRITE_ONCE(sr->resp.completion, PENDING_SIG);
+
+	ret = softreq_map_iobuf(sr, req);
+	if (ret) {
+		kfree(sr);
+		return ret;
+	}
+
+	/* get the context handle */
+	if (req->ctx_handle) {
+		struct ctx_hdr *hdr;
+		u8 *ctx_ptr;
+
+		ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
+		hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
+		ctx_handle = hdr->ctx_dma;
+	}
+
+	/* select the queue */
+	qno = smp_processor_id() % ndev->nr_queues;
+
+	sr->cmdq = &ndev->pkt_cmdqs[qno];
+
+	/*
+	 * 64-Byte Instruction Format
+	 *
+	 *  ----------------------
+	 *  |      DPTR0         | 8 bytes
+	 *  ----------------------
+	 *  |  PKT_IN_INSTR_HDR  | 8 bytes
+	 *  ----------------------
+	 *  |    PKT_IN_HDR      | 16 bytes
+	 *  ----------------------
+	 *  |    SLC_INFO        | 16 bytes
+	 *  ----------------------
+	 *  |   Front data       | 16 bytes
+	 *  ----------------------
+	 */
+
+	/* fill the packet instruction */
+	/* word 0 */
+	sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
+
+	/* word 1 */
+	sr->instr.ih.value = 0;
+	sr->instr.ih.s.g = 1;
+	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
+	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
+	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
+	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
+	sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
+
+	/* word 2 */
+	sr->instr.irh.value[0] = 0;
+	sr->instr.irh.s.uddl = MIN_UDD_LEN;
+	/* context length in 64-bit words */
+	sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
+	/* offset from solicit base port 256 */
+	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
+	sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
+	sr->instr.irh.s.arg = req->ctrl.s.arg;
+	sr->instr.irh.s.opcode = req->opcode;
+	sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
+
+	/* word 3 */
+	sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
+
+	/* word 4 */
+	sr->instr.slc.value[0] = 0;
+	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
+	sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
+
+	/* word 5 */
+	sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
+
+	/*
+	 * No conversion for front data,
+	 * It goes into payload
+	 * put GP Header in front data
+	 */
+	sr->instr.fdata[0] = *((u64 *)&req->gph);
+	sr->instr.fdata[1] = 0;
+
+	ret = nitrox_enqueue_request(sr);
+	if (ret == -ENOSPC)
+		goto send_fail;
+
+	return ret;
+
+send_fail:
+	softreq_destroy(sr);
+	return ret;
+}
+
+static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
+{
+	return time_after_eq(jiffies, (tstamp + timeout));
+}
+
+void backlog_qflush_work(struct work_struct *work)
+{
+	struct nitrox_cmdq *cmdq;
+
+	cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
+	post_backlog_cmds(cmdq);
+}
+
+/**
+ * process_request_list - process completed requests
+ * @ndev: N5 device
+ * @qno: queue to operate
+ *
+ * Returns the number of responses processed.
+ */
+static void process_response_list(struct nitrox_cmdq *cmdq)
+{
+	struct nitrox_device *ndev = cmdq->ndev;
+	struct nitrox_softreq *sr;
+	struct skcipher_request *skreq;
+	completion_t callback;
+	int req_completed = 0, err = 0, budget;
+
+	/* check all pending requests */
+	budget = atomic_read(&cmdq->pending_count);
+
+	while (req_completed < budget) {
+		sr = get_first_response_entry(cmdq);
+		if (!sr)
+			break;
+
+		if (atomic_read(&sr->status) != REQ_POSTED)
+			break;
+
+		/* check orh and completion bytes updates */
+		if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
+			/* request not completed, check for timeout */
+			if (!cmd_timeout(sr->tstamp, ndev->timeout))
+				break;
+			dev_err_ratelimited(DEV(ndev),
+					    "Request timeout, orh 0x%016llx\n",
+					    READ_ONCE(sr->resp.orh));
+		}
+		atomic_dec(&cmdq->pending_count);
+		/* sync with other cpus */
+		smp_mb__after_atomic();
+		/* remove from response list */
+		response_list_del(sr, cmdq);
+
+		callback = sr->callback;
+		skreq = sr->skreq;
+
+		/* ORH error code */
+		err = READ_ONCE(sr->resp.orh) & 0xff;
+		softreq_destroy(sr);
+
+		if (callback)
+			callback(skreq, err);
+
+		req_completed++;
+	}
+}
+
+/**
+ * pkt_slc_resp_handler - post processing of SE responses
+ */
+void pkt_slc_resp_handler(unsigned long data)
+{
+	struct bh_data *bh = (void *)(uintptr_t)(data);
+	struct nitrox_cmdq *cmdq = bh->cmdq;
+	union nps_pkt_slc_cnts pkt_slc_cnts;
+
+	/* read completion count */
+	pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
+	/* resend the interrupt if more work to do */
+	pkt_slc_cnts.s.resend = 1;
+
+	process_response_list(cmdq);
+
+	/*
+	 * clear the interrupt with resend bit enabled,
+	 * MSI-X interrupt generates if Completion count > Threshold
+	 */
+	writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
+	/* order the writes */
+	mmiowb();
+
+	if (atomic_read(&cmdq->backlog_count))
+		schedule_work(&cmdq->backlog_qflush);
+}
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
new file mode 100644
index 0000000..020d189
--- /dev/null
+++ b/drivers/crypto/cavium/zip/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Cavium's ZIP Driver.
+#
+
+obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
+thunderx_zip-y := zip_main.o    \
+                  zip_device.o  \
+                  zip_crypto.o  \
+                  zip_mem.o     \
+                  zip_deflate.o \
+                  zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
new file mode 100644
index 0000000..58fb3ed
--- /dev/null
+++ b/drivers/crypto/cavium/zip/common.h
@@ -0,0 +1,223 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+/* Device specific zlib function definitions */
+#include "zip_device.h"
+
+/* ZIP device definitions */
+#include "zip_main.h"
+
+/* ZIP memory allocation/deallocation related definitions */
+#include "zip_mem.h"
+
+/* Device specific structure definitions */
+#include "zip_regs.h"
+
+#define ZIP_ERROR    -1
+
+#define ZIP_FLUSH_FINISH  4
+
+#define RAW_FORMAT		0  /* for rawpipe */
+#define ZLIB_FORMAT		1  /* for zpipe */
+#define GZIP_FORMAT		2  /* for gzpipe */
+#define LZS_FORMAT		3  /* for lzspipe */
+
+/* Max number of ZIP devices supported */
+#define MAX_ZIP_DEVICES		2
+
+/* Configures the number of zip queues to be used */
+#define ZIP_NUM_QUEUES		2
+
+#define DYNAMIC_STOP_EXCESS	1024
+
+/* Maximum buffer sizes in direct mode */
+#define MAX_INPUT_BUFFER_SIZE   (64 * 1024)
+#define MAX_OUTPUT_BUFFER_SIZE  (64 * 1024)
+
+/**
+ * struct zip_operation - common data structure for comp and decomp operations
+ * @input:               Next input byte is read from here
+ * @output:              Next output byte written here
+ * @ctx_addr:            Inflate context buffer address
+ * @history:             Pointer to the history buffer
+ * @input_len:           Number of bytes available at next_in
+ * @input_total_len:     Total number of input bytes read
+ * @output_len:          Remaining free space at next_out
+ * @output_total_len:    Total number of bytes output so far
+ * @csum:                Checksum value of the uncompressed data
+ * @flush:               Flush flag
+ * @format:              Format (depends on stream's wrap)
+ * @speed:               Speed depends on stream's level
+ * @ccode:               Compression code ( stream's strategy)
+ * @lzs_flag:            Flag for LZS support
+ * @begin_file:          Beginning of file indication for inflate
+ * @history_len:         Size of the history data
+ * @end_file:            Ending of the file indication for inflate
+ * @compcode:            Completion status of the ZIP invocation
+ * @bytes_read:          Input bytes read in current instruction
+ * @bits_processed:      Total bits processed for entire file
+ * @sizeofptr:           To distinguish between ILP32 and LP64
+ * @sizeofzops:          Optional just for padding
+ *
+ * This structure is used to maintain the required meta data for the
+ * comp and decomp operations.
+ */
+struct zip_operation {
+	u8    *input;
+	u8    *output;
+	u64   ctx_addr;
+	u64   history;
+
+	u32   input_len;
+	u32   input_total_len;
+
+	u32   output_len;
+	u32   output_total_len;
+
+	u32   csum;
+	u32   flush;
+
+	u32   format;
+	u32   speed;
+	u32   ccode;
+	u32   lzs_flag;
+
+	u32   begin_file;
+	u32   history_len;
+
+	u32   end_file;
+	u32   compcode;
+	u32   bytes_read;
+	u32   bits_processed;
+
+	u32   sizeofptr;
+	u32   sizeofzops;
+};
+
+static inline int zip_poll_result(union zip_zres_s *result)
+{
+	int retries = 1000;
+
+	while (!result->s.compcode) {
+		if (!--retries) {
+			pr_err("ZIP ERR: request timed out");
+			return -ETIMEDOUT;
+		}
+		udelay(10);
+		/*
+		 * Force re-reading of compcode which is updated
+		 * by the ZIP coprocessor.
+		 */
+		rmb();
+	}
+	return 0;
+}
+
+/* error messages */
+#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
+			      fmt "\n", __func__, __LINE__, ## args)
+
+#ifdef MSG_ENABLE
+/* Enable all messages */
+#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
+#else
+#define zip_msg(fmt, args...)
+#endif
+
+#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
+
+#ifdef DEBUG_LEVEL
+
+#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
+	strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
+
+#if DEBUG_LEVEL >= 4
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
+			      fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
+
+#elif DEBUG_LEVEL >= 3
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
+			      fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
+
+#elif DEBUG_LEVEL >= 2
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
+			      fmt "\n", __func__, __LINE__, ## args)
+
+#else
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
+
+#endif /* DEBUG LEVEL >=4 */
+
+#else
+
+#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
+
+#endif /* DEBUG_LEVEL */
+#else
+
+#define zip_dbg(fmt, args...)
+
+#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
new file mode 100644
index 0000000..b92b6e7
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -0,0 +1,319 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "zip_crypto.h"
+
+static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
+				    int lzs_flag)
+{
+	zip_ops->flush        = ZIP_FLUSH_FINISH;
+
+	/* equivalent to level 6 of opensource zlib */
+	zip_ops->speed          = 1;
+
+	if (!lzs_flag) {
+		zip_ops->ccode		= 0; /* Auto Huffman */
+		zip_ops->lzs_flag	= 0;
+		zip_ops->format		= ZLIB_FORMAT;
+	} else {
+		zip_ops->ccode		= 3; /* LZS Encoding */
+		zip_ops->lzs_flag	= 1;
+		zip_ops->format		= LZS_FORMAT;
+	}
+	zip_ops->begin_file   = 1;
+	zip_ops->history_len  = 0;
+	zip_ops->end_file     = 1;
+	zip_ops->compcode     = 0;
+	zip_ops->csum	      = 1; /* Adler checksum desired */
+}
+
+int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
+{
+	struct zip_operation  *comp_ctx   = &zip_ctx->zip_comp;
+	struct zip_operation  *decomp_ctx = &zip_ctx->zip_decomp;
+
+	zip_static_init_zip_ops(comp_ctx, lzs_flag);
+	zip_static_init_zip_ops(decomp_ctx, lzs_flag);
+
+	comp_ctx->input  = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
+	if (!comp_ctx->input)
+		return -ENOMEM;
+
+	comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
+	if (!comp_ctx->output)
+		goto err_comp_input;
+
+	decomp_ctx->input  = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
+	if (!decomp_ctx->input)
+		goto err_comp_output;
+
+	decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
+	if (!decomp_ctx->output)
+		goto err_decomp_input;
+
+	return 0;
+
+err_decomp_input:
+	zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+
+err_comp_output:
+	zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+
+err_comp_input:
+	zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+
+	return -ENOMEM;
+}
+
+void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
+{
+	struct zip_operation  *comp_ctx   = &zip_ctx->zip_comp;
+	struct zip_operation  *dec_ctx = &zip_ctx->zip_decomp;
+
+	zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
+	zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+
+	zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
+	zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
+}
+
+int zip_compress(const u8 *src, unsigned int slen,
+		 u8 *dst, unsigned int *dlen,
+		 struct zip_kernel_ctx *zip_ctx)
+{
+	struct zip_operation  *zip_ops   = NULL;
+	struct zip_state      *zip_state;
+	struct zip_device     *zip = NULL;
+	int ret;
+
+	if (!zip_ctx || !src || !dst || !dlen)
+		return -ENOMEM;
+
+	zip = zip_get_device(zip_get_node_id());
+	if (!zip)
+		return -ENODEV;
+
+	zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
+	if (!zip_state)
+		return -ENOMEM;
+
+	zip_ops = &zip_ctx->zip_comp;
+
+	zip_ops->input_len  = slen;
+	zip_ops->output_len = *dlen;
+	memcpy(zip_ops->input, src, slen);
+
+	ret = zip_deflate(zip_ops, zip_state, zip);
+
+	if (!ret) {
+		*dlen = zip_ops->output_len;
+		memcpy(dst, zip_ops->output, *dlen);
+	}
+	kfree(zip_state);
+	return ret;
+}
+
+int zip_decompress(const u8 *src, unsigned int slen,
+		   u8 *dst, unsigned int *dlen,
+		   struct zip_kernel_ctx *zip_ctx)
+{
+	struct zip_operation  *zip_ops   = NULL;
+	struct zip_state      *zip_state;
+	struct zip_device     *zip = NULL;
+	int ret;
+
+	if (!zip_ctx || !src || !dst || !dlen)
+		return -ENOMEM;
+
+	zip = zip_get_device(zip_get_node_id());
+	if (!zip)
+		return -ENODEV;
+
+	zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
+	if (!zip_state)
+		return -ENOMEM;
+
+	zip_ops = &zip_ctx->zip_decomp;
+	memcpy(zip_ops->input, src, slen);
+
+	/* Work around for a bug in zlib which needs an extra bytes sometimes */
+	if (zip_ops->ccode != 3) /* Not LZS Encoding */
+		zip_ops->input[slen++] = 0;
+
+	zip_ops->input_len  = slen;
+	zip_ops->output_len = *dlen;
+
+	ret = zip_inflate(zip_ops, zip_state, zip);
+
+	if (!ret) {
+		*dlen = zip_ops->output_len;
+		memcpy(dst, zip_ops->output, *dlen);
+	}
+	kfree(zip_state);
+	return ret;
+}
+
+/* Legacy Compress framework start */
+int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
+{
+	int ret;
+	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+	ret = zip_ctx_init(zip_ctx, 0);
+
+	return ret;
+}
+
+int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
+{
+	int ret;
+	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+	ret = zip_ctx_init(zip_ctx, 1);
+
+	return ret;
+}
+
+void zip_free_comp_ctx(struct crypto_tfm *tfm)
+{
+	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+	zip_ctx_exit(zip_ctx);
+}
+
+int  zip_comp_compress(struct crypto_tfm *tfm,
+		       const u8 *src, unsigned int slen,
+		       u8 *dst, unsigned int *dlen)
+{
+	int ret;
+	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+	ret = zip_compress(src, slen, dst, dlen, zip_ctx);
+
+	return ret;
+}
+
+int  zip_comp_decompress(struct crypto_tfm *tfm,
+			 const u8 *src, unsigned int slen,
+			 u8 *dst, unsigned int *dlen)
+{
+	int ret;
+	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
+
+	ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
+
+	return ret;
+} /* Legacy compress framework end */
+
+/* SCOMP framework start */
+void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm)
+{
+	int ret;
+	struct zip_kernel_ctx *zip_ctx;
+
+	zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
+	if (!zip_ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ret = zip_ctx_init(zip_ctx, 0);
+
+	if (ret) {
+		kzfree(zip_ctx);
+		return ERR_PTR(ret);
+	}
+
+	return zip_ctx;
+}
+
+void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm)
+{
+	int ret;
+	struct zip_kernel_ctx *zip_ctx;
+
+	zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
+	if (!zip_ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ret = zip_ctx_init(zip_ctx, 1);
+
+	if (ret) {
+		kzfree(zip_ctx);
+		return ERR_PTR(ret);
+	}
+
+	return zip_ctx;
+}
+
+void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+	struct zip_kernel_ctx *zip_ctx = ctx;
+
+	zip_ctx_exit(zip_ctx);
+	kzfree(zip_ctx);
+}
+
+int zip_scomp_compress(struct crypto_scomp *tfm,
+		       const u8 *src, unsigned int slen,
+		       u8 *dst, unsigned int *dlen, void *ctx)
+{
+	int ret;
+	struct zip_kernel_ctx *zip_ctx  = ctx;
+
+	ret = zip_compress(src, slen, dst, dlen, zip_ctx);
+
+	return ret;
+}
+
+int zip_scomp_decompress(struct crypto_scomp *tfm,
+			 const u8 *src, unsigned int slen,
+			 u8 *dst, unsigned int *dlen, void *ctx)
+{
+	int ret;
+	struct zip_kernel_ctx *zip_ctx = ctx;
+
+	ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
+
+	return ret;
+} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
new file mode 100644
index 0000000..b59ddfc
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_crypto.h
@@ -0,0 +1,79 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_CRYPTO_H__
+#define __ZIP_CRYPTO_H__
+
+#include <linux/crypto.h>
+#include <crypto/internal/scompress.h>
+#include "common.h"
+#include "zip_deflate.h"
+#include "zip_inflate.h"
+
+struct zip_kernel_ctx {
+	struct zip_operation zip_comp;
+	struct zip_operation zip_decomp;
+};
+
+int  zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm);
+int  zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm);
+void zip_free_comp_ctx(struct crypto_tfm *tfm);
+int  zip_comp_compress(struct crypto_tfm *tfm,
+		       const u8 *src, unsigned int slen,
+		       u8 *dst, unsigned int *dlen);
+int  zip_comp_decompress(struct crypto_tfm *tfm,
+			 const u8 *src, unsigned int slen,
+			 u8 *dst, unsigned int *dlen);
+
+void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm);
+void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm);
+void  zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx);
+int   zip_scomp_compress(struct crypto_scomp *tfm,
+			 const u8 *src, unsigned int slen,
+			 u8 *dst, unsigned int *dlen, void *ctx);
+int   zip_scomp_decompress(struct crypto_scomp *tfm,
+			   const u8 *src, unsigned int slen,
+			   u8 *dst, unsigned int *dlen, void *ctx);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
new file mode 100644
index 0000000..d7133f8
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_deflate.c
@@ -0,0 +1,200 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "common.h"
+#include "zip_deflate.h"
+
+/* Prepares the deflate zip command */
+static int prepare_zip_command(struct zip_operation *zip_ops,
+			       struct zip_state *s, union zip_inst_s *zip_cmd)
+{
+	union zip_zres_s *result_ptr = &s->result;
+
+	memset(zip_cmd, 0, sizeof(s->zip_cmd));
+	memset(result_ptr, 0, sizeof(s->result));
+
+	/* IWORD #0 */
+	/* History gather */
+	zip_cmd->s.hg = 0;
+	/* compression enable = 1 for deflate */
+	zip_cmd->s.ce = 1;
+	/* sf (sync flush) */
+	zip_cmd->s.sf = 1;
+	/* ef (end of file) */
+	if (zip_ops->flush == ZIP_FLUSH_FINISH) {
+		zip_cmd->s.ef = 1;
+		zip_cmd->s.sf = 0;
+	}
+
+	zip_cmd->s.cc = zip_ops->ccode;
+	/* ss (compression speed/storage) */
+	zip_cmd->s.ss = zip_ops->speed;
+
+	/* IWORD #1 */
+	/* adler checksum */
+	zip_cmd->s.adlercrc32 = zip_ops->csum;
+	zip_cmd->s.historylength = zip_ops->history_len;
+	zip_cmd->s.dg = 0;
+
+	/* IWORD # 6 and 7 - compression input/history pointer */
+	zip_cmd->s.inp_ptr_addr.s.addr  = __pa(zip_ops->input);
+	zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
+					   zip_ops->history_len);
+	zip_cmd->s.ds = 0;
+
+	/* IWORD # 8 and 9 - Output pointer */
+	zip_cmd->s.out_ptr_addr.s.addr  = __pa(zip_ops->output);
+	zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
+	/* maximum number of output-stream bytes that can be written */
+	zip_cmd->s.totaloutputlength    = zip_ops->output_len;
+
+	/* IWORD # 10 and 11 - Result pointer */
+	zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
+	/* Clearing completion code */
+	result_ptr->s.compcode = 0;
+
+	return 0;
+}
+
+/**
+ * zip_deflate - API to offload deflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s:       Pointer to the structure representing zip state
+ * @zip_dev: Pointer to zip device structure
+ *
+ * This function prepares the zip deflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
+		struct zip_device *zip_dev)
+{
+	union zip_inst_s *zip_cmd = &s->zip_cmd;
+	union zip_zres_s *result_ptr = &s->result;
+	u32 queue;
+
+	/* Prepares zip command based on the input parameters */
+	prepare_zip_command(zip_ops, s, zip_cmd);
+
+	atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
+	/* Loads zip command into command queues and rings door bell */
+	queue = zip_load_instr(zip_cmd, zip_dev);
+
+	/* Stats update for compression requests submitted */
+	atomic64_inc(&zip_dev->stats.comp_req_submit);
+
+	/* Wait for completion or error */
+	zip_poll_result(result_ptr);
+
+	/* Stats update for compression requests completed */
+	atomic64_inc(&zip_dev->stats.comp_req_complete);
+
+	zip_ops->compcode = result_ptr->s.compcode;
+	switch (zip_ops->compcode) {
+	case ZIP_CMD_NOTDONE:
+		zip_dbg("Zip instruction not yet completed");
+		return ZIP_ERROR;
+
+	case ZIP_CMD_SUCCESS:
+		zip_dbg("Zip instruction completed successfully");
+		zip_update_cmd_bufs(zip_dev, queue);
+		break;
+
+	case ZIP_CMD_DTRUNC:
+		zip_dbg("Output Truncate error");
+		/* Returning ZIP_ERROR to avoid copy to user */
+		return ZIP_ERROR;
+
+	default:
+		zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
+		return ZIP_ERROR;
+	}
+
+	/* Update the CRC depending on the format */
+	switch (zip_ops->format) {
+	case RAW_FORMAT:
+		zip_dbg("RAW Format: %d ", zip_ops->format);
+		/* Get checksum from engine, need to feed it again */
+		zip_ops->csum = result_ptr->s.adler32;
+		break;
+
+	case ZLIB_FORMAT:
+		zip_dbg("ZLIB Format: %d ", zip_ops->format);
+		zip_ops->csum = result_ptr->s.adler32;
+		break;
+
+	case GZIP_FORMAT:
+		zip_dbg("GZIP Format: %d ", zip_ops->format);
+		zip_ops->csum = result_ptr->s.crc32;
+		break;
+
+	case LZS_FORMAT:
+		zip_dbg("LZS Format: %d ", zip_ops->format);
+		break;
+
+	default:
+		zip_err("Unknown Format:%d\n", zip_ops->format);
+	}
+
+	atomic64_add(result_ptr->s.totalbyteswritten,
+		     &zip_dev->stats.comp_out_bytes);
+
+	/* Update output_len */
+	if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
+		/* Dynamic stop && strm->output_len < zipconstants[onfsize] */
+		zip_err("output_len (%d) < total bytes written(%d)\n",
+			zip_ops->output_len, result_ptr->s.totalbyteswritten);
+		zip_ops->output_len = 0;
+
+	} else {
+		zip_ops->output_len = result_ptr->s.totalbyteswritten;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
new file mode 100644
index 0000000..1d32e76
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_deflate.h
@@ -0,0 +1,62 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_DEFLATE_H__
+#define __ZIP_DEFLATE_H__
+
+/**
+ * zip_deflate - API to offload deflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s:       Pointer to the structure representing zip state
+ * @zip_dev: Pointer to the structure representing zip device
+ *
+ * This function prepares the zip deflate command and submits it to the zip
+ * engine by ringing the doorbell.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
+		struct zip_device *zip_dev);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
new file mode 100644
index 0000000..f174ec2
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_device.c
@@ -0,0 +1,202 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "common.h"
+#include "zip_deflate.h"
+
+/**
+ * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
+ *
+ * @zip_dev: Pointer to zip device structure
+ * @queue:   Queue number
+ *
+ * Return: Bytes consumed in the command queue buffer.
+ */
+static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
+{
+	return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
+		sizeof(u64 *));
+}
+
+/**
+ * zip_load_instr - Submits the instruction into the ZIP command queue
+ * @instr:      Pointer to the instruction to be submitted
+ * @zip_dev:    Pointer to ZIP device structure to which the instruction is to
+ *              be submitted
+ *
+ * This function copies the ZIP instruction to the command queue and rings the
+ * doorbell to notify the engine of the instruction submission. The command
+ * queue is maintained in a circular fashion. When there is space for exactly
+ * one instruction in the queue, next chunk pointer of the queue is made to
+ * point to the head of the queue, thus maintaining a circular queue.
+ *
+ * Return: Queue number to which the instruction was submitted
+ */
+u32 zip_load_instr(union zip_inst_s *instr,
+		   struct zip_device *zip_dev)
+{
+	union zip_quex_doorbell dbell;
+	u32 queue = 0;
+	u32 consumed = 0;
+	u64 *ncb_ptr = NULL;
+	union zip_nptr_s ncp;
+
+	/*
+	 * Distribute the instructions between the enabled queues based on
+	 * the CPU id.
+	 */
+	if (raw_smp_processor_id() % 2 == 0)
+		queue = 0;
+	else
+		queue = 1;
+
+	zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue);
+
+	/* Take cmd buffer lock */
+	spin_lock(&zip_dev->iq[queue].lock);
+
+	/*
+	 * Command Queue implementation
+	 * 1. If there is place for new instructions, push the cmd at sw_head.
+	 * 2. If there is place for exactly one instruction, push the new cmd
+	 *    at the sw_head. Make sw_head point to the sw_tail to make it
+	 *    circular. Write sw_head's physical address to the "Next-Chunk
+	 *    Buffer Ptr" to make it cmd_hw_tail.
+	 * 3. Ring the door bell.
+	 */
+	zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
+	zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
+
+	consumed = zip_cmd_queue_consumed(zip_dev, queue);
+	/* Check if there is space to push just one cmd */
+	if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
+		zip_dbg("Cmd queue space available for single command");
+		/* Space for one cmd, pust it and make it circular queue */
+		memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
+		       sizeof(union zip_inst_s));
+		zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
+
+		/* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
+		ncb_ptr = zip_dev->iq[queue].sw_head;
+
+		zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
+			ncb_ptr, zip_dev->iq[queue].sw_head - 16);
+
+		/* Using Circular command queue */
+		zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
+		/* Mark this buffer for free */
+		zip_dev->iq[queue].free_flag = 1;
+
+		/* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
+		ncp.u_reg64 = 0ull;
+		ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
+		*ncb_ptr = ncp.u_reg64;
+		zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
+			*ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
+
+		zip_dev->iq[queue].pend_cnt++;
+
+	} else {
+		zip_dbg("Enough space is available for commands");
+		/* Push this cmd to cmd queue buffer */
+		memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
+		       sizeof(union zip_inst_s));
+		zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
+
+		zip_dev->iq[queue].pend_cnt++;
+	}
+	zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+		zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
+		zip_dev->iq[queue].hw_tail);
+
+	zip_dbg(" Pushed the new cmd : pend_cnt : %d",
+		zip_dev->iq[queue].pend_cnt);
+
+	/* Ring the doorbell */
+	dbell.u_reg64     = 0ull;
+	dbell.s.dbell_cnt = 1;
+	zip_reg_write(dbell.u_reg64,
+		      (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
+
+	/* Unlock cmd buffer lock */
+	spin_unlock(&zip_dev->iq[queue].lock);
+
+	return queue;
+}
+
+/**
+ * zip_update_cmd_bufs - Updates the queue statistics after posting the
+ *                       instruction
+ * @zip_dev: Pointer to zip device structure
+ * @queue:   Queue number
+ */
+void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
+{
+	/* Take cmd buffer lock */
+	spin_lock(&zip_dev->iq[queue].lock);
+
+	/* Check if the previous buffer can be freed */
+	if (zip_dev->iq[queue].free_flag == 1) {
+		zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
+		/* Reset the free flag */
+		zip_dev->iq[queue].free_flag = 0;
+
+		/* Point the hw_tail to start of the new chunk buffer */
+		zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
+	} else {
+		zip_dbg("Free flag not set. increment hw tail");
+		zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
+	}
+
+	zip_dev->iq[queue].done_cnt++;
+	zip_dev->iq[queue].pend_cnt--;
+
+	zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+		zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
+		zip_dev->iq[queue].hw_tail);
+	zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
+
+	spin_unlock(&zip_dev->iq[queue].lock);
+}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
new file mode 100644
index 0000000..9e18b3b
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_device.h
@@ -0,0 +1,108 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_DEVICE_H__
+#define __ZIP_DEVICE_H__
+
+#include <linux/types.h>
+#include "zip_main.h"
+
+struct sg_info {
+	/*
+	 * Pointer to the input data when scatter_gather == 0 and
+	 * pointer to the input gather list buffer when scatter_gather == 1
+	 */
+	union zip_zptr_s *gather;
+
+	/*
+	 * Pointer to the output data when scatter_gather == 0 and
+	 * pointer to the output scatter list buffer when scatter_gather == 1
+	 */
+	union zip_zptr_s *scatter;
+
+	/*
+	 * Holds size of the output buffer pointed by scatter list
+	 * when scatter_gather == 1
+	 */
+	u64 scatter_buf_size;
+
+	/* for gather data */
+	u64 gather_enable;
+
+	/* for scatter data */
+	u64 scatter_enable;
+
+	/* Number of gather list pointers for gather data */
+	u32 gbuf_cnt;
+
+	/* Number of scatter list pointers for scatter data */
+	u32 sbuf_cnt;
+
+	/* Buffers allocation state */
+	u8 alloc_state;
+};
+
+/**
+ * struct zip_state - Structure representing the required information related
+ *                    to a command
+ * @zip_cmd: Pointer to zip instruction structure
+ * @result:  Pointer to zip result structure
+ * @ctx:     Context pointer for inflate
+ * @history: Decompression history pointer
+ * @sginfo:  Scatter-gather info structure
+ */
+struct zip_state {
+	union zip_inst_s zip_cmd;
+	union zip_zres_s result;
+	union zip_zptr_s *ctx;
+	union zip_zptr_s *history;
+	struct sg_info   sginfo;
+};
+
+#define ZIP_CONTEXT_SIZE          2048
+#define ZIP_INFLATE_HISTORY_SIZE  32768
+#define ZIP_DEFLATE_HISTORY_SIZE  32768
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
new file mode 100644
index 0000000..7e0d73e
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_inflate.c
@@ -0,0 +1,223 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "common.h"
+#include "zip_inflate.h"
+
+static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
+				struct zip_state *s, union zip_inst_s *zip_cmd)
+{
+	union zip_zres_s *result_ptr = &s->result;
+
+	memset(zip_cmd, 0, sizeof(s->zip_cmd));
+	memset(result_ptr, 0, sizeof(s->result));
+
+	/* IWORD#0 */
+
+	/* Decompression History Gather list - no gather list */
+	zip_cmd->s.hg = 0;
+	/* For decompression, CE must be 0x0. */
+	zip_cmd->s.ce = 0;
+	/* For decompression, SS must be 0x0. */
+	zip_cmd->s.ss = 0;
+	/* For decompression, SF should always be set. */
+	zip_cmd->s.sf = 1;
+
+	/* Begin File */
+	if (zip_ops->begin_file == 0)
+		zip_cmd->s.bf = 0;
+	else
+		zip_cmd->s.bf = 1;
+
+	zip_cmd->s.ef = 1;
+	/* 0: for Deflate decompression, 3: for LZS decompression */
+	zip_cmd->s.cc = zip_ops->ccode;
+
+	/* IWORD #1*/
+
+	/* adler checksum */
+	zip_cmd->s.adlercrc32 = zip_ops->csum;
+
+	/*
+	 * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
+	 * History data is added to a decompression operation via IWORD3.
+	 */
+	zip_cmd->s.historylength = 0;
+	zip_cmd->s.ds = 0;
+
+	/* IWORD # 8 and 9 - Output pointer */
+	zip_cmd->s.out_ptr_addr.s.addr  = __pa(zip_ops->output);
+	zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
+
+	/* Maximum number of output-stream bytes that can be written */
+	zip_cmd->s.totaloutputlength    = zip_ops->output_len;
+
+	zip_dbg("Data Direct Input case ");
+
+	/* IWORD # 6 and 7 - input pointer */
+	zip_cmd->s.dg = 0;
+	zip_cmd->s.inp_ptr_addr.s.addr  = __pa((u8 *)zip_ops->input);
+	zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
+
+	/* IWORD # 10 and 11 - Result pointer */
+	zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
+
+	/* Clearing completion code */
+	result_ptr->s.compcode = 0;
+
+	/* Returning 0 for time being.*/
+	return 0;
+}
+
+/**
+ * zip_inflate - API to offload inflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s:       Pointer to the structure representing zip state
+ * @zip_dev: Pointer to zip device structure
+ *
+ * This function prepares the zip inflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
+		struct zip_device *zip_dev)
+{
+	union zip_inst_s *zip_cmd    = &s->zip_cmd;
+	union zip_zres_s  *result_ptr = &s->result;
+	u32 queue;
+
+	/* Prepare inflate zip command */
+	prepare_inflate_zcmd(zip_ops, s, zip_cmd);
+
+	atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
+
+	/* Load inflate command to zip queue and ring the doorbell */
+	queue = zip_load_instr(zip_cmd, zip_dev);
+
+	/* Decompression requests submitted stats update */
+	atomic64_inc(&zip_dev->stats.decomp_req_submit);
+
+	/* Wait for completion or error */
+	zip_poll_result(result_ptr);
+
+	/* Decompression requests completed stats update */
+	atomic64_inc(&zip_dev->stats.decomp_req_complete);
+
+	zip_ops->compcode = result_ptr->s.compcode;
+	switch (zip_ops->compcode) {
+	case ZIP_CMD_NOTDONE:
+		zip_dbg("Zip Instruction not yet completed\n");
+		return ZIP_ERROR;
+
+	case ZIP_CMD_SUCCESS:
+		zip_dbg("Zip Instruction completed successfully\n");
+		break;
+
+	case ZIP_CMD_DYNAMIC_STOP:
+		zip_dbg(" Dynamic stop Initiated\n");
+		break;
+
+	default:
+		zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
+		atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
+		zip_update_cmd_bufs(zip_dev, queue);
+		return ZIP_ERROR;
+	}
+
+	zip_update_cmd_bufs(zip_dev, queue);
+
+	if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
+	    (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
+		result_ptr->s.ef = 1;
+
+	zip_ops->csum = result_ptr->s.adler32;
+
+	atomic64_add(result_ptr->s.totalbyteswritten,
+		     &zip_dev->stats.decomp_out_bytes);
+
+	if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
+		zip_err("output_len (%d) < total bytes written (%d)\n",
+			zip_ops->output_len, result_ptr->s.totalbyteswritten);
+		zip_ops->output_len = 0;
+	} else {
+		zip_ops->output_len = result_ptr->s.totalbyteswritten;
+	}
+
+	zip_ops->bytes_read = result_ptr->s.totalbytesread;
+	zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
+	zip_ops->end_file = result_ptr->s.ef;
+	if (zip_ops->end_file) {
+		switch (zip_ops->format) {
+		case RAW_FORMAT:
+			zip_dbg("RAW Format: %d ", zip_ops->format);
+			/* Get checksum from engine */
+			zip_ops->csum = result_ptr->s.adler32;
+			break;
+
+		case ZLIB_FORMAT:
+			zip_dbg("ZLIB Format: %d ", zip_ops->format);
+			zip_ops->csum = result_ptr->s.adler32;
+			break;
+
+		case GZIP_FORMAT:
+			zip_dbg("GZIP Format: %d ", zip_ops->format);
+			zip_ops->csum = result_ptr->s.crc32;
+			break;
+
+		case LZS_FORMAT:
+			zip_dbg("LZS Format: %d ", zip_ops->format);
+			break;
+
+		default:
+			zip_err("Format error:%d\n", zip_ops->format);
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
new file mode 100644
index 0000000..6b20f17
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_inflate.h
@@ -0,0 +1,62 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_INFLATE_H__
+#define __ZIP_INFLATE_H__
+
+/**
+ * zip_inflate - API to offload inflate operation to hardware
+ * @zip_ops: Pointer to zip operation structure
+ * @s:       Pointer to the structure representing zip state
+ * @zip_dev: Pointer to the structure representing zip device
+ *
+ * This function prepares the zip inflate command and submits it to the zip
+ * engine for processing.
+ *
+ * Return: 0 if successful or error code
+ */
+int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
+		struct zip_device *zip_dev);
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
new file mode 100644
index 0000000..be055b9
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -0,0 +1,727 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include "common.h"
+#include "zip_crypto.h"
+
+#define DRV_NAME		"ThunderX-ZIP"
+
+static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
+
+static const struct pci_device_id zip_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
+	{ 0, }
+};
+
+void zip_reg_write(u64 val, u64 __iomem *addr)
+{
+	writeq(val, addr);
+}
+
+u64 zip_reg_read(u64 __iomem *addr)
+{
+	return readq(addr);
+}
+
+/*
+ * Allocates new ZIP device structure
+ * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
+ */
+static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
+{
+	struct zip_device *zip = NULL;
+	int idx;
+
+	for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
+		if (!zip_dev[idx])
+			break;
+	}
+
+	/* To ensure that the index is within the limit */
+	if (idx < MAX_ZIP_DEVICES)
+		zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
+
+	if (!zip)
+		return NULL;
+
+	zip_dev[idx] = zip;
+	zip->index = idx;
+	return zip;
+}
+
+/**
+ * zip_get_device - Get ZIP device based on node id of cpu
+ *
+ * @node: Node id of the current cpu
+ * Return: Pointer to Zip device structure
+ */
+struct zip_device *zip_get_device(int node)
+{
+	if ((node < MAX_ZIP_DEVICES) && (node >= 0))
+		return zip_dev[node];
+
+	zip_err("ZIP device not found for node id %d\n", node);
+	return NULL;
+}
+
+/**
+ * zip_get_node_id - Get the node id of the current cpu
+ *
+ * Return: Node id of the current cpu
+ */
+int zip_get_node_id(void)
+{
+	return cpu_to_node(raw_smp_processor_id());
+}
+
+/* Initializes the ZIP h/w sub-system */
+static int zip_init_hw(struct zip_device *zip)
+{
+	union zip_cmd_ctl    cmd_ctl;
+	union zip_constants  constants;
+	union zip_que_ena    que_ena;
+	union zip_quex_map   que_map;
+	union zip_que_pri    que_pri;
+
+	union zip_quex_sbuf_addr que_sbuf_addr;
+	union zip_quex_sbuf_ctl  que_sbuf_ctl;
+
+	int q = 0;
+
+	/* Enable the ZIP Engine(Core) Clock */
+	cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
+	cmd_ctl.s.forceclk = 1;
+	zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
+
+	zip_msg("ZIP_CMD_CTL  : 0x%016llx",
+		zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
+
+	constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
+	zip->depth    = constants.s.depth;
+	zip->onfsize  = constants.s.onfsize;
+	zip->ctxsize  = constants.s.ctxsize;
+
+	zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
+		zip->depth, zip->onfsize, zip->ctxsize);
+
+	/*
+	 * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
+	 * have the correct buffer pointer and size configured for each
+	 * instruction queue.
+	 */
+	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+		que_sbuf_ctl.u_reg64 = 0ull;
+		que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
+		que_sbuf_ctl.s.inst_be   = 0;
+		que_sbuf_ctl.s.stream_id = 0;
+		zip_reg_write(que_sbuf_ctl.u_reg64,
+			      (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
+
+		zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
+			zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
+	}
+
+	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+		memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
+
+		spin_lock_init(&zip->iq[q].lock);
+
+		if (zip_cmd_qbuf_alloc(zip, q)) {
+			while (q != 0) {
+				q--;
+				zip_cmd_qbuf_free(zip, q);
+			}
+			return -ENOMEM;
+		}
+
+		/* Initialize tail ptr to head */
+		zip->iq[q].sw_tail = zip->iq[q].sw_head;
+		zip->iq[q].hw_tail = zip->iq[q].sw_head;
+
+		/* Write the physical addr to register */
+		que_sbuf_addr.u_reg64   = 0ull;
+		que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
+				       ZIP_128B_ALIGN);
+
+		zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
+			(u64)que_sbuf_addr.s.ptr);
+
+		zip_reg_write(que_sbuf_addr.u_reg64,
+			      (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
+
+		zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
+			zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
+
+		zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
+			zip->iq[q].sw_head, zip->iq[q].sw_tail,
+			zip->iq[q].hw_tail);
+		zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
+	}
+
+	/*
+	 * Queue-to-ZIP core mapping
+	 * If a queue is not mapped to a particular core, it is equivalent to
+	 * the ZIP core being disabled.
+	 */
+	que_ena.u_reg64 = 0x0ull;
+	/* Enabling queues based on ZIP_NUM_QUEUES */
+	for (q = 0; q < ZIP_NUM_QUEUES; q++)
+		que_ena.s.ena |= (0x1 << q);
+	zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
+
+	zip_msg("QUE_ENA      : 0x%016llx",
+		zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
+
+	for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+		que_map.u_reg64 = 0ull;
+		/* Mapping each queue to two ZIP cores */
+		que_map.s.zce = 0x3;
+		zip_reg_write(que_map.u_reg64,
+			      (zip->reg_base + ZIP_QUEX_MAP(q)));
+
+		zip_msg("QUE_MAP(%d)   : 0x%016llx", q,
+			zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
+	}
+
+	que_pri.u_reg64 = 0ull;
+	for (q = 0; q < ZIP_NUM_QUEUES; q++)
+		que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
+	zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
+
+	zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
+
+	return 0;
+}
+
+static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct zip_device *zip = NULL;
+	int    err;
+
+	zip = zip_alloc_device(pdev);
+	if (!zip)
+		return -ENOMEM;
+
+	dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
+		 pdev->vendor, pdev->device, dev_to_node(dev));
+
+	pci_set_drvdata(pdev, zip);
+	zip->pdev = pdev;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Failed to enable PCI device");
+		goto err_free_device;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(dev, "PCI request regions failed 0x%x", err);
+		goto err_disable_device;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get usable DMA configuration\n");
+		goto err_release_regions;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+	if (err) {
+		dev_err(dev, "Unable to get 48-bit DMA for allocations\n");
+		goto err_release_regions;
+	}
+
+	/* MAP configuration registers */
+	zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
+	if (!zip->reg_base) {
+		dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	/* Initialize ZIP Hardware */
+	err = zip_init_hw(zip);
+	if (err)
+		goto err_release_regions;
+
+	return 0;
+
+err_release_regions:
+	if (zip->reg_base)
+		iounmap(zip->reg_base);
+	pci_release_regions(pdev);
+
+err_disable_device:
+	pci_disable_device(pdev);
+
+err_free_device:
+	pci_set_drvdata(pdev, NULL);
+
+	/* Remove zip_dev from zip_device list, free the zip_device memory */
+	zip_dev[zip->index] = NULL;
+	devm_kfree(dev, zip);
+
+	return err;
+}
+
+static void zip_remove(struct pci_dev *pdev)
+{
+	struct zip_device *zip = pci_get_drvdata(pdev);
+	union zip_cmd_ctl cmd_ctl;
+	int q = 0;
+
+	if (!zip)
+		return;
+
+	if (zip->reg_base) {
+		cmd_ctl.u_reg64 = 0x0ull;
+		cmd_ctl.s.reset = 1;  /* Forces ZIP cores to do reset */
+		zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
+		iounmap(zip->reg_base);
+	}
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	/*
+	 * Free Command Queue buffers. This free should be called for all
+	 * the enabled Queues.
+	 */
+	for (q = 0; q < ZIP_NUM_QUEUES; q++)
+		zip_cmd_qbuf_free(zip, q);
+
+	pci_set_drvdata(pdev, NULL);
+	/* remove zip device from zip device list */
+	zip_dev[zip->index] = NULL;
+}
+
+/* PCI Sub-System Interface */
+static struct pci_driver zip_driver = {
+	.name	    =  DRV_NAME,
+	.id_table   =  zip_id_table,
+	.probe	    =  zip_probe,
+	.remove     =  zip_remove,
+};
+
+/* Kernel Crypto Subsystem Interface */
+
+static struct crypto_alg zip_comp_deflate = {
+	.cra_name		= "deflate",
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct zip_kernel_ctx),
+	.cra_priority           = 300,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= zip_alloc_comp_ctx_deflate,
+	.cra_exit		= zip_free_comp_ctx,
+	.cra_u			= { .compress = {
+		.coa_compress	= zip_comp_compress,
+		.coa_decompress	= zip_comp_decompress
+		 } }
+};
+
+static struct crypto_alg zip_comp_lzs = {
+	.cra_name		= "lzs",
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct zip_kernel_ctx),
+	.cra_priority           = 300,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= zip_alloc_comp_ctx_lzs,
+	.cra_exit		= zip_free_comp_ctx,
+	.cra_u			= { .compress = {
+		.coa_compress	= zip_comp_compress,
+		.coa_decompress	= zip_comp_decompress
+		 } }
+};
+
+static struct scomp_alg zip_scomp_deflate = {
+	.alloc_ctx		= zip_alloc_scomp_ctx_deflate,
+	.free_ctx		= zip_free_scomp_ctx,
+	.compress		= zip_scomp_compress,
+	.decompress		= zip_scomp_decompress,
+	.base			= {
+		.cra_name		= "deflate",
+		.cra_driver_name	= "deflate-scomp",
+		.cra_module		= THIS_MODULE,
+		.cra_priority           = 300,
+	}
+};
+
+static struct scomp_alg zip_scomp_lzs = {
+	.alloc_ctx		= zip_alloc_scomp_ctx_lzs,
+	.free_ctx		= zip_free_scomp_ctx,
+	.compress		= zip_scomp_compress,
+	.decompress		= zip_scomp_decompress,
+	.base			= {
+		.cra_name		= "lzs",
+		.cra_driver_name	= "lzs-scomp",
+		.cra_module		= THIS_MODULE,
+		.cra_priority           = 300,
+	}
+};
+
+static int zip_register_compression_device(void)
+{
+	int ret;
+
+	ret = crypto_register_alg(&zip_comp_deflate);
+	if (ret < 0) {
+		zip_err("Deflate algorithm registration failed\n");
+		return ret;
+	}
+
+	ret = crypto_register_alg(&zip_comp_lzs);
+	if (ret < 0) {
+		zip_err("LZS algorithm registration failed\n");
+		goto err_unregister_alg_deflate;
+	}
+
+	ret = crypto_register_scomp(&zip_scomp_deflate);
+	if (ret < 0) {
+		zip_err("Deflate scomp algorithm registration failed\n");
+		goto err_unregister_alg_lzs;
+	}
+
+	ret = crypto_register_scomp(&zip_scomp_lzs);
+	if (ret < 0) {
+		zip_err("LZS scomp algorithm registration failed\n");
+		goto err_unregister_scomp_deflate;
+	}
+
+	return ret;
+
+err_unregister_scomp_deflate:
+	crypto_unregister_scomp(&zip_scomp_deflate);
+err_unregister_alg_lzs:
+	crypto_unregister_alg(&zip_comp_lzs);
+err_unregister_alg_deflate:
+	crypto_unregister_alg(&zip_comp_deflate);
+
+	return ret;
+}
+
+static void zip_unregister_compression_device(void)
+{
+	crypto_unregister_alg(&zip_comp_deflate);
+	crypto_unregister_alg(&zip_comp_lzs);
+	crypto_unregister_scomp(&zip_scomp_deflate);
+	crypto_unregister_scomp(&zip_scomp_lzs);
+}
+
+/*
+ * debugfs functions
+ */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+/* Displays ZIP device statistics */
+static int zip_show_stats(struct seq_file *s, void *unused)
+{
+	u64 val = 0ull;
+	u64 avg_chunk = 0ull, avg_cr = 0ull;
+	u32 q = 0;
+
+	int index  = 0;
+	struct zip_device *zip;
+	struct zip_stats  *st;
+
+	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+		u64 pending = 0;
+
+		if (zip_dev[index]) {
+			zip = zip_dev[index];
+			st  = &zip->stats;
+
+			/* Get all the pending requests */
+			for (q = 0; q < ZIP_NUM_QUEUES; q++) {
+				val = zip_reg_read((zip->reg_base +
+						    ZIP_DBG_QUEX_STA(q)));
+				pending += val >> 32 & 0xffffff;
+			}
+
+			val = atomic64_read(&st->comp_req_complete);
+			avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
+
+			val = atomic64_read(&st->comp_out_bytes);
+			avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
+			seq_printf(s, "        ZIP Device %d Stats\n"
+				      "-----------------------------------\n"
+				      "Comp Req Submitted        : \t%lld\n"
+				      "Comp Req Completed        : \t%lld\n"
+				      "Compress In Bytes         : \t%lld\n"
+				      "Compressed Out Bytes      : \t%lld\n"
+				      "Average Chunk size        : \t%llu\n"
+				      "Average Compression ratio : \t%llu\n"
+				      "Decomp Req Submitted      : \t%lld\n"
+				      "Decomp Req Completed      : \t%lld\n"
+				      "Decompress In Bytes       : \t%lld\n"
+				      "Decompressed Out Bytes    : \t%lld\n"
+				      "Decompress Bad requests   : \t%lld\n"
+				      "Pending Req               : \t%lld\n"
+					"---------------------------------\n",
+				       index,
+				       (u64)atomic64_read(&st->comp_req_submit),
+				       (u64)atomic64_read(&st->comp_req_complete),
+				       (u64)atomic64_read(&st->comp_in_bytes),
+				       (u64)atomic64_read(&st->comp_out_bytes),
+				       avg_chunk,
+				       avg_cr,
+				       (u64)atomic64_read(&st->decomp_req_submit),
+				       (u64)atomic64_read(&st->decomp_req_complete),
+				       (u64)atomic64_read(&st->decomp_in_bytes),
+				       (u64)atomic64_read(&st->decomp_out_bytes),
+				       (u64)atomic64_read(&st->decomp_bad_reqs),
+				       pending);
+		}
+	}
+	return 0;
+}
+
+/* Clears stats data */
+static int zip_clear_stats(struct seq_file *s, void *unused)
+{
+	int index = 0;
+
+	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+		if (zip_dev[index]) {
+			memset(&zip_dev[index]->stats, 0,
+			       sizeof(struct zip_stats));
+			seq_printf(s, "Cleared stats for zip %d\n", index);
+		}
+	}
+
+	return 0;
+}
+
+static struct zip_registers zipregs[64] = {
+	{"ZIP_CMD_CTL        ",  0x0000ull},
+	{"ZIP_THROTTLE       ",  0x0010ull},
+	{"ZIP_CONSTANTS      ",  0x00A0ull},
+	{"ZIP_QUE0_MAP       ",  0x1400ull},
+	{"ZIP_QUE1_MAP       ",  0x1408ull},
+	{"ZIP_QUE_ENA        ",  0x0500ull},
+	{"ZIP_QUE_PRI        ",  0x0508ull},
+	{"ZIP_QUE0_DONE      ",  0x2000ull},
+	{"ZIP_QUE1_DONE      ",  0x2008ull},
+	{"ZIP_QUE0_DOORBELL  ",  0x4000ull},
+	{"ZIP_QUE1_DOORBELL  ",  0x4008ull},
+	{"ZIP_QUE0_SBUF_ADDR ",  0x1000ull},
+	{"ZIP_QUE1_SBUF_ADDR ",  0x1008ull},
+	{"ZIP_QUE0_SBUF_CTL  ",  0x1200ull},
+	{"ZIP_QUE1_SBUF_CTL  ",  0x1208ull},
+	{ NULL, 0}
+};
+
+/* Prints registers' contents */
+static int zip_print_regs(struct seq_file *s, void *unused)
+{
+	u64 val = 0;
+	int i = 0, index = 0;
+
+	for (index = 0; index < MAX_ZIP_DEVICES; index++) {
+		if (zip_dev[index]) {
+			seq_printf(s, "--------------------------------\n"
+				      "     ZIP Device %d Registers\n"
+				      "--------------------------------\n",
+				      index);
+
+			i = 0;
+
+			while (zipregs[i].reg_name) {
+				val = zip_reg_read((zip_dev[index]->reg_base +
+						    zipregs[i].reg_offset));
+				seq_printf(s, "%s: 0x%016llx\n",
+					   zipregs[i].reg_name, val);
+				i++;
+			}
+		}
+	}
+	return 0;
+}
+
+static int zip_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, zip_show_stats, NULL);
+}
+
+static const struct file_operations zip_stats_fops = {
+	.owner = THIS_MODULE,
+	.open  = zip_stats_open,
+	.read  = seq_read,
+};
+
+static int zip_clear_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, zip_clear_stats, NULL);
+}
+
+static const struct file_operations zip_clear_fops = {
+	.owner = THIS_MODULE,
+	.open  = zip_clear_open,
+	.read  = seq_read,
+};
+
+static int zip_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, zip_print_regs, NULL);
+}
+
+static const struct file_operations zip_regs_fops = {
+	.owner = THIS_MODULE,
+	.open  = zip_regs_open,
+	.read  = seq_read,
+};
+
+/* Root directory for thunderx_zip debugfs entry */
+static struct dentry *zip_debugfs_root;
+
+static int __init zip_debugfs_init(void)
+{
+	struct dentry *zip_stats, *zip_clear, *zip_regs;
+
+	if (!debugfs_initialized())
+		return -ENODEV;
+
+	zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
+	if (!zip_debugfs_root)
+		return -ENOMEM;
+
+	/* Creating files for entries inside thunderx_zip directory */
+	zip_stats = debugfs_create_file("zip_stats", 0444,
+					zip_debugfs_root,
+					NULL, &zip_stats_fops);
+	if (!zip_stats)
+		goto failed_to_create;
+
+	zip_clear = debugfs_create_file("zip_clear", 0444,
+					zip_debugfs_root,
+					NULL, &zip_clear_fops);
+	if (!zip_clear)
+		goto failed_to_create;
+
+	zip_regs = debugfs_create_file("zip_regs", 0444,
+				       zip_debugfs_root,
+				       NULL, &zip_regs_fops);
+	if (!zip_regs)
+		goto failed_to_create;
+
+	return 0;
+
+failed_to_create:
+	debugfs_remove_recursive(zip_debugfs_root);
+	return -ENOENT;
+}
+
+static void __exit zip_debugfs_exit(void)
+{
+	debugfs_remove_recursive(zip_debugfs_root);
+}
+
+#else
+static int __init zip_debugfs_init(void)
+{
+	return 0;
+}
+
+static void __exit zip_debugfs_exit(void) { }
+
+#endif
+/* debugfs - end */
+
+static int __init zip_init_module(void)
+{
+	int ret;
+
+	zip_msg("%s\n", DRV_NAME);
+
+	ret = pci_register_driver(&zip_driver);
+	if (ret < 0) {
+		zip_err("ZIP: pci_register_driver() failed\n");
+		return ret;
+	}
+
+	/* Register with the Kernel Crypto Interface */
+	ret = zip_register_compression_device();
+	if (ret < 0) {
+		zip_err("ZIP: Kernel Crypto Registration failed\n");
+		goto err_pci_unregister;
+	}
+
+	/* comp-decomp statistics are handled with debugfs interface */
+	ret = zip_debugfs_init();
+	if (ret < 0) {
+		zip_err("ZIP: debugfs initialization failed\n");
+		goto err_crypto_unregister;
+	}
+
+	return ret;
+
+err_crypto_unregister:
+	zip_unregister_compression_device();
+
+err_pci_unregister:
+	pci_unregister_driver(&zip_driver);
+	return ret;
+}
+
+static void __exit zip_cleanup_module(void)
+{
+	zip_debugfs_exit();
+
+	/* Unregister from the kernel crypto interface */
+	zip_unregister_compression_device();
+
+	/* Unregister this driver for pci zip devices */
+	pci_unregister_driver(&zip_driver);
+}
+
+module_init(zip_init_module);
+module_exit(zip_cleanup_module);
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
new file mode 100644
index 0000000..e1e4fa9
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_main.h
@@ -0,0 +1,120 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_MAIN_H__
+#define __ZIP_MAIN_H__
+
+#include "zip_device.h"
+#include "zip_regs.h"
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_THUNDERX_ZIP   0xA01A
+
+/* ZIP device BARs */
+#define PCI_CFG_ZIP_PF_BAR0   0  /* Base addr for normal regs */
+
+/* Maximum available zip queues */
+#define ZIP_MAX_NUM_QUEUES    8
+
+#define ZIP_128B_ALIGN        7
+
+/* Command queue buffer size */
+#define ZIP_CMD_QBUF_SIZE     (8064 + 8)
+
+struct zip_registers {
+	char  *reg_name;
+	u64   reg_offset;
+};
+
+/* ZIP Compression - Decompression stats */
+struct zip_stats {
+	atomic64_t    comp_req_submit;
+	atomic64_t    comp_req_complete;
+	atomic64_t    decomp_req_submit;
+	atomic64_t    decomp_req_complete;
+	atomic64_t    comp_in_bytes;
+	atomic64_t    comp_out_bytes;
+	atomic64_t    decomp_in_bytes;
+	atomic64_t    decomp_out_bytes;
+	atomic64_t    decomp_bad_reqs;
+};
+
+/* ZIP Instruction Queue */
+struct zip_iq {
+	u64        *sw_head;
+	u64        *sw_tail;
+	u64        *hw_tail;
+	u64        done_cnt;
+	u64        pend_cnt;
+	u64        free_flag;
+
+	/* ZIP IQ lock */
+	spinlock_t  lock;
+};
+
+/* ZIP Device */
+struct zip_device {
+	u32               index;
+	void __iomem      *reg_base;
+	struct pci_dev    *pdev;
+
+	/* Different ZIP Constants */
+	u64               depth;
+	u64               onfsize;
+	u64               ctxsize;
+
+	struct zip_iq     iq[ZIP_MAX_NUM_QUEUES];
+	struct zip_stats  stats;
+};
+
+/* Prototypes */
+struct zip_device *zip_get_device(int node_id);
+int zip_get_node_id(void);
+void zip_reg_write(u64 val, u64 __iomem *addr);
+u64 zip_reg_read(u64 __iomem *addr);
+void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
+u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
+
+#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
new file mode 100644
index 0000000..b3e0843
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_mem.c
@@ -0,0 +1,114 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include "common.h"
+
+/**
+ * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
+ * @zip: Pointer to zip device structure
+ * @q:   Queue number to allocate bufffer to
+ * Return: 0 if successful, -ENOMEM otherwise
+ */
+int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
+{
+	zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
+						get_order(ZIP_CMD_QBUF_SIZE));
+
+	if (!zip->iq[q].sw_head)
+		return -ENOMEM;
+
+	memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
+
+	zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
+	return 0;
+}
+
+/**
+ * zip_cmd_qbuf_free - Frees the cmd Queue buffer
+ * @zip: Pointer to zip device structure
+ * @q:   Queue number to free buffer of
+ */
+void zip_cmd_qbuf_free(struct zip_device *zip, int q)
+{
+	zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
+
+	free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
+}
+
+/**
+ * zip_data_buf_alloc - Allocates memory for a data bufffer
+ * @size:   Size of the buffer to allocate
+ * Returns: Pointer to the buffer allocated
+ */
+u8 *zip_data_buf_alloc(u64 size)
+{
+	u8 *ptr;
+
+	ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
+					get_order(size));
+
+	if (!ptr)
+		return NULL;
+
+	memset(ptr, 0, size);
+
+	zip_dbg("Data buffer allocation success\n");
+	return ptr;
+}
+
+/**
+ * zip_data_buf_free - Frees the memory of a data buffer
+ * @ptr:  Pointer to the buffer
+ * @size: Buffer size
+ */
+void zip_data_buf_free(u8 *ptr, u64 size)
+{
+	zip_dbg("Freeing data buffer 0x%lx\n", ptr);
+
+	free_pages((u64)ptr, get_order(size));
+}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
new file mode 100644
index 0000000..f8f2f08
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_mem.h
@@ -0,0 +1,78 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_MEM_H__
+#define __ZIP_MEM_H__
+
+/**
+ * zip_cmd_qbuf_free - Frees the cmd Queue buffer
+ * @zip: Pointer to zip device structure
+ * @q:   Queue nmber to free buffer of
+ */
+void zip_cmd_qbuf_free(struct zip_device *zip, int q);
+
+/**
+ * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
+ * @zip: Pointer to zip device structure
+ * @q:   Queue number to allocate bufffer to
+ * Return: 0 if successful, 1 otherwise
+ */
+int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
+
+/**
+ * zip_data_buf_alloc - Allocates memory for a data bufffer
+ * @size:   Size of the buffer to allocate
+ * Returns: Pointer to the buffer allocated
+ */
+u8 *zip_data_buf_alloc(u64 size);
+
+/**
+ * zip_data_buf_free - Frees the memory of a data buffer
+ * @ptr:  Pointer to the buffer
+ * @size: Buffer size
+ */
+void zip_data_buf_free(u8 *ptr, u64 size);
+
+#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
new file mode 100644
index 0000000..874e023
--- /dev/null
+++ b/drivers/crypto/cavium/zip/zip_regs.h
@@ -0,0 +1,1347 @@
+/***********************license start************************************
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ * All rights reserved.
+ *
+ * License: one of 'Cavium License' or 'GNU General Public License Version 2'
+ *
+ * This file is provided under the terms of the Cavium License (see below)
+ * or under the terms of GNU General Public License, Version 2, as
+ * published by the Free Software Foundation. When using or redistributing
+ * this file, you may do so under either license.
+ *
+ * Cavium License:  Redistribution and use in source and binary forms, with
+ * or without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *
+ *  * Neither the name of Cavium Inc. nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import
+ * regulations in other countries.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
+ * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
+ * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+ * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+ * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
+ * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
+ * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
+ * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
+ * ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
+ * WITH YOU.
+ ***********************license end**************************************/
+
+#ifndef __ZIP_REGS_H__
+#define __ZIP_REGS_H__
+
+/*
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium ZIP.
+ */
+
+#include <linux/kern_levels.h>
+
+/* ZIP invocation result completion status codes */
+#define ZIP_CMD_NOTDONE        0x0
+
+/* Successful completion. */
+#define ZIP_CMD_SUCCESS        0x1
+
+/* Output truncated */
+#define ZIP_CMD_DTRUNC         0x2
+
+/* Dynamic Stop */
+#define ZIP_CMD_DYNAMIC_STOP   0x3
+
+/* Uncompress ran out of input data when IWORD0[EF] was set */
+#define ZIP_CMD_ITRUNC         0x4
+
+/* Uncompress found the reserved block type 3 */
+#define ZIP_CMD_RBLOCK         0x5
+
+/*
+ * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
+ */
+#define ZIP_CMD_NLEN           0x6
+
+/* Uncompress found a bad code in the main Huffman codes. */
+#define ZIP_CMD_BADCODE        0x7
+
+/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
+#define ZIP_CMD_BADCODE2       0x8
+
+/* Compress found a zero-length input. */
+#define ZIP_CMD_ZERO_LEN       0x9
+
+/* The compress or decompress encountered an internal parity error. */
+#define ZIP_CMD_PARITY         0xA
+
+/*
+ * Uncompress found a string identifier that precedes the uncompressed data and
+ * decompression history.
+ */
+#define ZIP_CMD_FATAL          0xB
+
+/**
+ * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
+ * interrupt vectors.
+ */
+enum zip_int_vec_e {
+	ZIP_INT_VEC_E_ECCE = 0x10,
+	ZIP_INT_VEC_E_FIFE = 0x11,
+	ZIP_INT_VEC_E_QUE0_DONE = 0x0,
+	ZIP_INT_VEC_E_QUE0_ERR = 0x8,
+	ZIP_INT_VEC_E_QUE1_DONE = 0x1,
+	ZIP_INT_VEC_E_QUE1_ERR = 0x9,
+	ZIP_INT_VEC_E_QUE2_DONE = 0x2,
+	ZIP_INT_VEC_E_QUE2_ERR = 0xa,
+	ZIP_INT_VEC_E_QUE3_DONE = 0x3,
+	ZIP_INT_VEC_E_QUE3_ERR = 0xb,
+	ZIP_INT_VEC_E_QUE4_DONE = 0x4,
+	ZIP_INT_VEC_E_QUE4_ERR = 0xc,
+	ZIP_INT_VEC_E_QUE5_DONE = 0x5,
+	ZIP_INT_VEC_E_QUE5_ERR = 0xd,
+	ZIP_INT_VEC_E_QUE6_DONE = 0x6,
+	ZIP_INT_VEC_E_QUE6_ERR = 0xe,
+	ZIP_INT_VEC_E_QUE7_DONE = 0x7,
+	ZIP_INT_VEC_E_QUE7_ERR = 0xf,
+	ZIP_INT_VEC_E_ENUM_LAST = 0x12,
+};
+
+/**
+ * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_addr_s {
+	u64 u_reg64;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_49_63              : 15;
+		u64 addr                        : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 addr                        : 49;
+		u64 reserved_49_63              : 15;
+#endif
+	} s;
+
+};
+
+/**
+ * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_ctl_s {
+	u64 u_reg64;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_112_127            : 16;
+		u64 length                      : 16;
+		u64 reserved_67_95              : 29;
+		u64 fw                          : 1;
+		u64 nc                          : 1;
+		u64 data_be                     : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 data_be                     : 1;
+		u64 nc                          : 1;
+		u64 fw                          : 1;
+		u64 reserved_67_95              : 29;
+		u64 length                      : 16;
+		u64 reserved_112_127            : 16;
+#endif
+	} s;
+};
+
+/**
+ * union zip_inst_s - ZIP Instruction Structure.
+ * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
+ * the structure).
+ */
+union zip_inst_s {
+	u64 u_reg64[16];
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 doneint                     : 1;
+		u64 reserved_56_62              : 7;
+		u64 totaloutputlength           : 24;
+		u64 reserved_27_31              : 5;
+		u64 exn                         : 3;
+		u64 reserved_23_23              : 1;
+		u64 exbits                      : 7;
+		u64 reserved_12_15              : 4;
+		u64 sf                          : 1;
+		u64 ss                          : 2;
+		u64 cc                          : 2;
+		u64 ef                          : 1;
+		u64 bf                          : 1;
+		u64 ce                          : 1;
+		u64 reserved_3_3                : 1;
+		u64 ds                          : 1;
+		u64 dg                          : 1;
+		u64 hg                          : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 hg                          : 1;
+		u64 dg                          : 1;
+		u64 ds                          : 1;
+		u64 reserved_3_3                : 1;
+		u64 ce                          : 1;
+		u64 bf                          : 1;
+		u64 ef                          : 1;
+		u64 cc                          : 2;
+		u64 ss                          : 2;
+		u64 sf                          : 1;
+		u64 reserved_12_15              : 4;
+		u64 exbits                      : 7;
+		u64 reserved_23_23              : 1;
+		u64 exn                         : 3;
+		u64 reserved_27_31              : 5;
+		u64 totaloutputlength           : 24;
+		u64 reserved_56_62              : 7;
+		u64 doneint                     : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 historylength               : 16;
+		u64 reserved_96_111             : 16;
+		u64 adlercrc32                  : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 adlercrc32                  : 32;
+		u64 reserved_96_111             : 16;
+		u64 historylength               : 16;
+#endif
+		union zip_zptr_addr_s ctx_ptr_addr;
+		union zip_zptr_ctl_s ctx_ptr_ctl;
+		union zip_zptr_addr_s his_ptr_addr;
+		union zip_zptr_ctl_s his_ptr_ctl;
+		union zip_zptr_addr_s inp_ptr_addr;
+		union zip_zptr_ctl_s inp_ptr_ctl;
+		union zip_zptr_addr_s out_ptr_addr;
+		union zip_zptr_ctl_s out_ptr_ctl;
+		union zip_zptr_addr_s res_ptr_addr;
+		union zip_zptr_ctl_s res_ptr_ctl;
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_817_831            : 15;
+		u64 wq_ptr                      : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 wq_ptr                      : 49;
+		u64 reserved_817_831            : 15;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_882_895            : 14;
+		u64 tt                          : 2;
+		u64 reserved_874_879            : 6;
+		u64 grp                         : 10;
+		u64 tag                         : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 tag                         : 32;
+		u64 grp                         : 10;
+		u64 reserved_874_879            : 6;
+		u64 tt                          : 2;
+		u64 reserved_882_895            : 14;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_896_959            : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 reserved_896_959            : 64;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_960_1023           : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 reserved_960_1023           : 64;
+#endif
+	} s;
+};
+
+/**
+ * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
+ * Structure
+ *
+ * ZIP_NPTR structure is used to chain all the zip instruction buffers
+ * together. ZIP instruction buffers are managed (allocated and released) by
+ * the software.
+ */
+union zip_nptr_s {
+	u64 u_reg64;
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_49_63              : 15;
+		u64 addr                        : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 addr                        : 49;
+		u64 reserved_49_63              : 15;
+#endif
+	} s;
+};
+
+/**
+ * union zip_zptr_s - ZIP Generic Pointer Structure.
+ *
+ * It is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_s {
+	u64 u_reg64[2];
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_49_63              : 15;
+		u64 addr                        : 49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 addr                        : 49;
+		u64 reserved_49_63              : 15;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_112_127            : 16;
+		u64 length                      : 16;
+		u64 reserved_67_95              : 29;
+		u64 fw                          : 1;
+		u64 nc                          : 1;
+		u64 data_be                     : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 data_be                     : 1;
+		u64 nc                          : 1;
+		u64 fw                          : 1;
+		u64 reserved_67_95              : 29;
+		u64 length                      : 16;
+		u64 reserved_112_127            : 16;
+#endif
+	} s;
+};
+
+/**
+ * union zip_zres_s - ZIP Result Structure
+ *
+ * The ZIP coprocessor writes the result structure after it completes the
+ * invocation. The result structure is exactly 24 bytes, and each invocation of
+ * the ZIP coprocessor produces exactly one result structure.
+ */
+union zip_zres_s {
+	u64 u_reg64[3];
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 crc32                       : 32;
+		u64 adler32                     : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 adler32                     : 32;
+		u64 crc32                       : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 totalbyteswritten           : 32;
+		u64 totalbytesread              : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 totalbytesread              : 32;
+		u64 totalbyteswritten           : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 totalbitsprocessed          : 32;
+		u64 doneint                     : 1;
+		u64 reserved_155_158            : 4;
+		u64 exn                         : 3;
+		u64 reserved_151_151            : 1;
+		u64 exbits                      : 7;
+		u64 reserved_137_143            : 7;
+		u64 ef                          : 1;
+
+		volatile u64 compcode           : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+
+		volatile u64 compcode           : 8;
+		u64 ef                          : 1;
+		u64 reserved_137_143            : 7;
+		u64 exbits                      : 7;
+		u64 reserved_151_151            : 1;
+		u64 exn                         : 3;
+		u64 reserved_155_158            : 4;
+		u64 doneint                     : 1;
+		u64 totalbitsprocessed          : 32;
+#endif
+	} s;
+};
+
+/**
+ * union zip_cmd_ctl - Structure representing the register that controls
+ * clock and reset.
+ */
+union zip_cmd_ctl {
+	u64 u_reg64;
+	struct zip_cmd_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_2_63               : 62;
+		u64 forceclk                    : 1;
+		u64 reset                       : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 reset                       : 1;
+		u64 forceclk                    : 1;
+		u64 reserved_2_63               : 62;
+#endif
+	} s;
+};
+
+#define ZIP_CMD_CTL 0x0ull
+
+/**
+ * union zip_constants - Data structure representing the register that contains
+ * all of the current implementation-related parameters of the zip core in this
+ * chip.
+ */
+union zip_constants {
+	u64 u_reg64;
+	struct zip_constants_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 nexec                       : 8;
+		u64 reserved_49_55              : 7;
+		u64 syncflush_capable           : 1;
+		u64 depth                       : 16;
+		u64 onfsize                     : 12;
+		u64 ctxsize                     : 12;
+		u64 reserved_1_7                : 7;
+		u64 disabled                    : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 disabled                    : 1;
+		u64 reserved_1_7                : 7;
+		u64 ctxsize                     : 12;
+		u64 onfsize                     : 12;
+		u64 depth                       : 16;
+		u64 syncflush_capable           : 1;
+		u64 reserved_49_55              : 7;
+		u64 nexec                       : 8;
+#endif
+	} s;
+};
+
+#define ZIP_CONSTANTS 0x00A0ull
+
+/**
+ * union zip_corex_bist_status - Represents registers which have the BIST
+ * status of memories in zip cores.
+ *
+ * Each bit is the BIST result of an individual memory
+ * (per bit, 0 = pass and 1 = fail).
+ */
+union zip_corex_bist_status {
+	u64 u_reg64;
+	struct zip_corex_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_53_63              : 11;
+		u64 bstatus                     : 53;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 bstatus                     : 53;
+		u64 reserved_53_63              : 11;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
+{
+	if (param1 <= 1)
+		return 0x0520ull + (param1 & 1) * 0x8ull;
+	pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_ctl_bist_status - Represents register that has the BIST status of
+ * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
+ * buffer, output data buffers).
+ *
+ * Each bit is the BIST result of an individual memory
+ * (per bit, 0 = pass and 1 = fail).
+ */
+union zip_ctl_bist_status {
+	u64 u_reg64;
+	struct zip_ctl_bist_status_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_9_63               : 55;
+		u64 bstatus                     : 9;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 bstatus                     : 9;
+		u64 reserved_9_63               : 55;
+#endif
+	} s;
+};
+
+#define ZIP_CTL_BIST_STATUS 0x0510ull
+
+/**
+ * union zip_ctl_cfg - Represents the register that controls the behavior of
+ * the ZIP DMA engines.
+ *
+ * It is recommended to keep default values for normal operation. Changing the
+ * values of the fields may be useful for diagnostics.
+ */
+union zip_ctl_cfg {
+	u64 u_reg64;
+	struct zip_ctl_cfg_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_52_63              : 12;
+		u64 ildf                        : 4;
+		u64 reserved_36_47              : 12;
+		u64 drtf                        : 4;
+		u64 reserved_27_31              : 5;
+		u64 stcf                        : 3;
+		u64 reserved_19_23              : 5;
+		u64 ldf                         : 3;
+		u64 reserved_2_15               : 14;
+		u64 busy                        : 1;
+		u64 reserved_0_0                : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 reserved_0_0                : 1;
+		u64 busy                        : 1;
+		u64 reserved_2_15               : 14;
+		u64 ldf                         : 3;
+		u64 reserved_19_23              : 5;
+		u64 stcf                        : 3;
+		u64 reserved_27_31              : 5;
+		u64 drtf                        : 4;
+		u64 reserved_36_47              : 12;
+		u64 ildf                        : 4;
+		u64 reserved_52_63              : 12;
+#endif
+	} s;
+};
+
+#define ZIP_CTL_CFG 0x0560ull
+
+/**
+ * union zip_dbg_corex_inst - Represents the registers that reflect the status
+ * of the current instruction that the ZIP core is executing or has executed.
+ *
+ * These registers are only for debug use.
+ */
+union zip_dbg_corex_inst {
+	u64 u_reg64;
+	struct zip_dbg_corex_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 busy                        : 1;
+		u64 reserved_35_62              : 28;
+		u64 qid                         : 3;
+		u64 iid                         : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 iid                         : 32;
+		u64 qid                         : 3;
+		u64 reserved_35_62              : 28;
+		u64 busy                        : 1;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_DBG_COREX_INST(u64 param1)
+{
+	if (param1 <= 1)
+		return 0x0640ull + (param1 & 1) * 0x8ull;
+	pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_dbg_corex_sta - Represents registers that reflect the status of
+ * the zip cores.
+ *
+ * They are for debug use only.
+ */
+union zip_dbg_corex_sta {
+	u64 u_reg64;
+	struct zip_dbg_corex_sta_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 busy                        : 1;
+		u64 reserved_37_62              : 26;
+		u64 ist                         : 5;
+		u64 nie                         : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 nie                         : 32;
+		u64 ist                         : 5;
+		u64 reserved_37_62              : 26;
+		u64 busy                        : 1;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_DBG_COREX_STA(u64 param1)
+{
+	if (param1 <= 1)
+		return 0x0680ull + (param1 & 1) * 0x8ull;
+	pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
+ * instruction queues.
+ *
+ * They are for debug use only.
+ */
+union zip_dbg_quex_sta {
+	u64 u_reg64;
+	struct zip_dbg_quex_sta_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 busy                        : 1;
+		u64 reserved_56_62              : 7;
+		u64 rqwc                        : 24;
+		u64 nii                         : 32;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 nii                         : 32;
+		u64 rqwc                        : 24;
+		u64 reserved_56_62              : 7;
+		u64 busy                        : 1;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x1800ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_ecc_ctl - Represents the register that enables ECC for each
+ * individual internal memory that requires ECC.
+ *
+ * For debug purpose, it can also flip one or two bits in the ECC data.
+ */
+union zip_ecc_ctl {
+	u64 u_reg64;
+	struct zip_ecc_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_19_63              : 45;
+		u64 vmem_cdis                   : 1;
+		u64 vmem_fs                     : 2;
+		u64 reserved_15_15              : 1;
+		u64 idf1_cdis                   : 1;
+		u64 idf1_fs                     : 2;
+		u64 reserved_11_11              : 1;
+		u64 idf0_cdis                   : 1;
+		u64 idf0_fs                     : 2;
+		u64 reserved_7_7                : 1;
+		u64 gspf_cdis                   : 1;
+		u64 gspf_fs                     : 2;
+		u64 reserved_3_3                : 1;
+		u64 iqf_cdis                    : 1;
+		u64 iqf_fs                      : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 iqf_fs                      : 2;
+		u64 iqf_cdis                    : 1;
+		u64 reserved_3_3                : 1;
+		u64 gspf_fs                     : 2;
+		u64 gspf_cdis                   : 1;
+		u64 reserved_7_7                : 1;
+		u64 idf0_fs                     : 2;
+		u64 idf0_cdis                   : 1;
+		u64 reserved_11_11              : 1;
+		u64 idf1_fs                     : 2;
+		u64 idf1_cdis                   : 1;
+		u64 reserved_15_15              : 1;
+		u64 vmem_fs                     : 2;
+		u64 vmem_cdis                   : 1;
+		u64 reserved_19_63              : 45;
+#endif
+	} s;
+};
+
+#define ZIP_ECC_CTL 0x0568ull
+
+/* NCB - zip_ecce_ena_w1c */
+union zip_ecce_ena_w1c {
+	u64 u_reg64;
+	struct zip_ecce_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_37_63              : 27;
+		u64 dbe                         : 5;
+		u64 reserved_5_31               : 27;
+		u64 sbe                         : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 sbe                         : 5;
+		u64 reserved_5_31               : 27;
+		u64 dbe                         : 5;
+		u64 reserved_37_63              : 27;
+#endif
+	} s;
+};
+
+#define ZIP_ECCE_ENA_W1C 0x0598ull
+
+/* NCB - zip_ecce_ena_w1s */
+union zip_ecce_ena_w1s {
+	u64 u_reg64;
+	struct zip_ecce_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_37_63              : 27;
+		u64 dbe                         : 5;
+		u64 reserved_5_31               : 27;
+		u64 sbe                         : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 sbe                         : 5;
+		u64 reserved_5_31               : 27;
+		u64 dbe                         : 5;
+		u64 reserved_37_63              : 27;
+#endif
+	} s;
+};
+
+#define ZIP_ECCE_ENA_W1S 0x0590ull
+
+/**
+ * union zip_ecce_int - Represents the register that contains the status of the
+ * ECC interrupt sources.
+ */
+union zip_ecce_int {
+	u64 u_reg64;
+	struct zip_ecce_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_37_63              : 27;
+		u64 dbe                         : 5;
+		u64 reserved_5_31               : 27;
+		u64 sbe                         : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 sbe                         : 5;
+		u64 reserved_5_31               : 27;
+		u64 dbe                         : 5;
+		u64 reserved_37_63              : 27;
+#endif
+	} s;
+};
+
+#define ZIP_ECCE_INT 0x0580ull
+
+/* NCB - zip_ecce_int_w1s */
+union zip_ecce_int_w1s {
+	u64 u_reg64;
+	struct zip_ecce_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_37_63              : 27;
+		u64 dbe                         : 5;
+		u64 reserved_5_31               : 27;
+		u64 sbe                         : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 sbe                         : 5;
+		u64 reserved_5_31               : 27;
+		u64 dbe                         : 5;
+		u64 reserved_37_63              : 27;
+#endif
+	} s;
+};
+
+#define ZIP_ECCE_INT_W1S 0x0588ull
+
+/* NCB - zip_fife_ena_w1c */
+union zip_fife_ena_w1c {
+	u64 u_reg64;
+	struct zip_fife_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_42_63              : 22;
+		u64 asserts                     : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 asserts                     : 42;
+		u64 reserved_42_63              : 22;
+#endif
+	} s;
+};
+
+#define ZIP_FIFE_ENA_W1C 0x0090ull
+
+/* NCB - zip_fife_ena_w1s */
+union zip_fife_ena_w1s {
+	u64 u_reg64;
+	struct zip_fife_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_42_63              : 22;
+		u64 asserts                     : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 asserts                     : 42;
+		u64 reserved_42_63              : 22;
+#endif
+	} s;
+};
+
+#define ZIP_FIFE_ENA_W1S 0x0088ull
+
+/* NCB - zip_fife_int */
+union zip_fife_int {
+	u64 u_reg64;
+	struct zip_fife_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_42_63              : 22;
+		u64 asserts                     : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 asserts                     : 42;
+		u64 reserved_42_63              : 22;
+#endif
+	} s;
+};
+
+#define ZIP_FIFE_INT 0x0078ull
+
+/* NCB - zip_fife_int_w1s */
+union zip_fife_int_w1s {
+	u64 u_reg64;
+	struct zip_fife_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_42_63              : 22;
+		u64 asserts                     : 42;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 asserts                     : 42;
+		u64 reserved_42_63              : 22;
+#endif
+	} s;
+};
+
+#define ZIP_FIFE_INT_W1S 0x0080ull
+
+/**
+ * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
+ *
+ * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_pbax {
+	u64 u_reg64;
+	struct zip_msix_pbax_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 pend                        : 64;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 pend                        : 64;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_MSIX_PBAX(u64 param1)
+{
+	if (param1 == 0)
+		return 0x0000838000FF0000ull;
+	pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
+ * table, indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_vecx_addr {
+	u64 u_reg64;
+	struct zip_msix_vecx_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_49_63              : 15;
+		u64 addr                        : 47;
+		u64 reserved_1_1                : 1;
+		u64 secvec                      : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 secvec                      : 1;
+		u64 reserved_1_1                : 1;
+		u64 addr                        : 47;
+		u64 reserved_49_63              : 15;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
+{
+	if (param1 <= 17)
+		return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
+	pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
+ * table, indexed by the ZIP_INT_VEC_E enumeration.
+ */
+union zip_msix_vecx_ctl {
+	u64 u_reg64;
+	struct zip_msix_vecx_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_33_63              : 31;
+		u64 mask                        : 1;
+		u64 reserved_20_31              : 12;
+		u64 data                        : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 data                        : 20;
+		u64 reserved_20_31              : 12;
+		u64 mask                        : 1;
+		u64 reserved_33_63              : 31;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
+{
+	if (param1 <= 17)
+		return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
+	pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_done - Represents the registers that contain the per-queue
+ * instruction done count.
+ */
+union zip_quex_done {
+	u64 u_reg64;
+	struct zip_quex_done_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_20_63              : 44;
+		u64 done                        : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 done                        : 20;
+		u64 reserved_20_63              : 44;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_DONE(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x2000ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_DONE: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_done_ack - Represents the registers on write to which will
+ * decrement the per-queue instructiona done count.
+ */
+union zip_quex_done_ack {
+	u64 u_reg64;
+	struct zip_quex_done_ack_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_20_63              : 44;
+		u64 done_ack                    : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 done_ack                    : 20;
+		u64 reserved_20_63              : 44;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x2200ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_done_ena_w1c - Represents the register which when written
+ * 1 to will disable the DONEINT interrupt for the queue.
+ */
+union zip_quex_done_ena_w1c {
+	u64 u_reg64;
+	struct zip_quex_done_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_1_63               : 63;
+		u64 done_ena                    : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 done_ena                    : 1;
+		u64 reserved_1_63               : 63;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x2600ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
+ * will enable the DONEINT interrupt for the queue.
+ */
+union zip_quex_done_ena_w1s {
+	u64 u_reg64;
+	struct zip_quex_done_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_1_63               : 63;
+		u64 done_ena                    : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 done_ena                    : 1;
+		u64 reserved_1_63               : 63;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x2400ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_done_wait - Represents the register that specifies the per
+ * queue interrupt coalescing settings.
+ */
+union zip_quex_done_wait {
+	u64 u_reg64;
+	struct zip_quex_done_wait_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_48_63              : 16;
+		u64 time_wait                   : 16;
+		u64 reserved_20_31              : 12;
+		u64 num_wait                    : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 num_wait                    : 20;
+		u64 reserved_20_31              : 12;
+		u64 time_wait                   : 16;
+		u64 reserved_48_63              : 16;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x2800ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_doorbell - Represents doorbell registers for the ZIP
+ * instruction queues.
+ */
+union zip_quex_doorbell {
+	u64 u_reg64;
+	struct zip_quex_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_20_63              : 44;
+		u64 dbell_cnt                   : 20;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 dbell_cnt                   : 20;
+		u64 reserved_20_63              : 44;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x4000ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
+	return 0;
+}
+
+union zip_quex_err_ena_w1c {
+	u64 u_reg64;
+	struct zip_quex_err_ena_w1c_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_5_63               : 59;
+		u64 mdbe                        : 1;
+		u64 nwrp                        : 1;
+		u64 nrrp                        : 1;
+		u64 irde                        : 1;
+		u64 dovf                        : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 dovf                        : 1;
+		u64 irde                        : 1;
+		u64 nrrp                        : 1;
+		u64 nwrp                        : 1;
+		u64 mdbe                        : 1;
+		u64 reserved_5_63               : 59;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x3600ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
+	return 0;
+}
+
+union zip_quex_err_ena_w1s {
+	u64 u_reg64;
+	struct zip_quex_err_ena_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_5_63               : 59;
+		u64 mdbe                        : 1;
+		u64 nwrp                        : 1;
+		u64 nrrp                        : 1;
+		u64 irde                        : 1;
+		u64 dovf                        : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 dovf                        : 1;
+		u64 irde                        : 1;
+		u64 nrrp                        : 1;
+		u64 nwrp                        : 1;
+		u64 mdbe                        : 1;
+		u64 reserved_5_63               : 59;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x3400ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_err_int - Represents registers that contain the per-queue
+ * error interrupts.
+ */
+union zip_quex_err_int {
+	u64 u_reg64;
+	struct zip_quex_err_int_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_5_63               : 59;
+		u64 mdbe                        : 1;
+		u64 nwrp                        : 1;
+		u64 nrrp                        : 1;
+		u64 irde                        : 1;
+		u64 dovf                        : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 dovf                        : 1;
+		u64 irde                        : 1;
+		u64 nrrp                        : 1;
+		u64 nwrp                        : 1;
+		u64 mdbe                        : 1;
+		u64 reserved_5_63               : 59;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x3000ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
+	return 0;
+}
+
+/* NCB - zip_que#_err_int_w1s */
+union zip_quex_err_int_w1s {
+	u64 u_reg64;
+	struct zip_quex_err_int_w1s_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_5_63               : 59;
+		u64 mdbe                        : 1;
+		u64 nwrp                        : 1;
+		u64 nrrp                        : 1;
+		u64 irde                        : 1;
+		u64 dovf                        : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 dovf                        : 1;
+		u64 irde                        : 1;
+		u64 nrrp                        : 1;
+		u64 nwrp                        : 1;
+		u64 mdbe                        : 1;
+		u64 reserved_5_63               : 59;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x3200ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_gcfg - Represents the registers that reflect status of the
+ * zip instruction queues,debug use only.
+ */
+union zip_quex_gcfg {
+	u64 u_reg64;
+	struct zip_quex_gcfg_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_4_63               : 60;
+		u64 iqb_ldwb                    : 1;
+		u64 cbw_sty                     : 1;
+		u64 l2ld_cmd                    : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 l2ld_cmd                    : 2;
+		u64 cbw_sty                     : 1;
+		u64 iqb_ldwb                    : 1;
+		u64 reserved_4_63               : 60;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_GCFG(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x1A00ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_map - Represents the registers that control how each
+ * instruction queue maps to zip cores.
+ */
+union zip_quex_map {
+	u64 u_reg64;
+	struct zip_quex_map_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_2_63               : 62;
+		u64 zce                         : 2;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 zce                         : 2;
+		u64 reserved_2_63               : 62;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_MAP(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x1400ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_MAP: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_sbuf_addr - Represents the registers that set the buffer
+ * parameters for the instruction queues.
+ *
+ * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
+ * this register to effectively reset the command buffer state machine.
+ * These registers must be programmed after SW programs the corresponding
+ * ZIP_QUE(0..7)_SBUF_CTL.
+ */
+union zip_quex_sbuf_addr {
+	u64 u_reg64;
+	struct zip_quex_sbuf_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_49_63              : 15;
+		u64 ptr                         : 42;
+		u64 off                         : 7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 off                         : 7;
+		u64 ptr                         : 42;
+		u64 reserved_49_63              : 15;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x1000ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
+ * parameters for the instruction queues.
+ *
+ * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
+ * this register to effectively reset the command buffer state machine.
+ * These registers must be programmed before SW programs the corresponding
+ * ZIP_QUE(0..7)_SBUF_ADDR.
+ */
+union zip_quex_sbuf_ctl {
+	u64 u_reg64;
+	struct zip_quex_sbuf_ctl_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_45_63              : 19;
+		u64 size                        : 13;
+		u64 inst_be                     : 1;
+		u64 reserved_24_30              : 7;
+		u64 stream_id                   : 8;
+		u64 reserved_12_15              : 4;
+		u64 aura                        : 12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 aura                        : 12;
+		u64 reserved_12_15              : 4;
+		u64 stream_id                   : 8;
+		u64 reserved_24_30              : 7;
+		u64 inst_be                     : 1;
+		u64 size                        : 13;
+		u64 reserved_45_63              : 19;
+#endif
+	} s;
+};
+
+static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
+{
+	if (param1 <= 7)
+		return 0x1200ull + (param1 & 7) * 0x8ull;
+	pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
+	return 0;
+}
+
+/**
+ * union zip_que_ena - Represents queue enable register
+ *
+ * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
+ */
+union zip_que_ena {
+	u64 u_reg64;
+	struct zip_que_ena_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_8_63               : 56;
+		u64 ena                         : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 ena                         : 8;
+		u64 reserved_8_63               : 56;
+#endif
+	} s;
+};
+
+#define ZIP_QUE_ENA 0x0500ull
+
+/**
+ * union zip_que_pri - Represents the register that defines the priority
+ * between instruction queues.
+ */
+union zip_que_pri {
+	u64 u_reg64;
+	struct zip_que_pri_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_8_63               : 56;
+		u64 pri                         : 8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 pri                         : 8;
+		u64 reserved_8_63               : 56;
+#endif
+	} s;
+};
+
+#define ZIP_QUE_PRI 0x0508ull
+
+/**
+ * union zip_throttle - Represents the register that controls the maximum
+ * number of in-flight X2I data fetch transactions.
+ *
+ * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
+ * accesses; it is not recommended for normal operation, but may be useful for
+ * diagnostics.
+ */
+union zip_throttle {
+	u64 u_reg64;
+	struct zip_throttle_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u64 reserved_6_63               : 58;
+		u64 ld_infl                     : 6;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+		u64 ld_infl                     : 6;
+		u64 reserved_6_63               : 58;
+#endif
+	} s;
+};
+
+#define ZIP_THROTTLE 0x0010ull
+
+#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
new file mode 100644
index 0000000..b9dfae4
--- /dev/null
+++ b/drivers/crypto/ccp/Kconfig
@@ -0,0 +1,46 @@
+config CRYPTO_DEV_CCP_DD
+	tristate "Secure Processor device driver"
+	depends on CPU_SUP_AMD || ARM64
+	default m
+	help
+	  Provides AMD Secure Processor device driver.
+	  If you choose 'M' here, this module will be called ccp.
+
+config CRYPTO_DEV_SP_CCP
+	bool "Cryptographic Coprocessor device"
+	default y
+	depends on CRYPTO_DEV_CCP_DD
+	select HW_RANDOM
+	select DMA_ENGINE
+	select DMADEVICES
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	help
+	  Provides the support for AMD Cryptographic Coprocessor (CCP) device
+	  which can be used to offload encryption operations such as SHA, AES
+	  and more.
+
+config CRYPTO_DEV_CCP_CRYPTO
+	tristate "Encryption and hashing offload support"
+	default m
+	depends on CRYPTO_DEV_CCP_DD
+	depends on CRYPTO_DEV_SP_CCP
+	select CRYPTO_HASH
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AUTHENC
+	select CRYPTO_RSA
+	help
+	  Support for using the cryptographic API with the AMD Cryptographic
+	  Coprocessor. This module supports offload of SHA and AES algorithms.
+	  If you choose 'M' here, this module will be called ccp_crypto.
+
+config CRYPTO_DEV_SP_PSP
+	bool "Platform Security Processor (PSP) device"
+	default y
+	depends on CRYPTO_DEV_CCP_DD && X86_64
+	help
+	 Provide support for the AMD Platform Security Processor (PSP).
+	 The PSP is a dedicated processor that provides support for key
+	 management commands in Secure Encrypted Virtualization (SEV) mode,
+	 along with software-based Trusted Execution Environment (TEE) to
+	 enable third-party trusted applications.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 0000000..51d1c0c
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
+ccp-objs  := sp-dev.o sp-platform.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
+	    ccp-ops.o \
+	    ccp-dev-v3.o \
+	    ccp-dev-v5.o \
+	    ccp-dmaengine.o \
+	    ccp-debugfs.o
+ccp-$(CONFIG_PCI) += sp-pci.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
+
+obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
+ccp-crypto-objs := ccp-crypto-main.o \
+		   ccp-crypto-aes.o \
+		   ccp-crypto-aes-cmac.o \
+		   ccp-crypto-aes-xts.o \
+		   ccp-crypto-aes-galois.o \
+		   ccp-crypto-des3.o \
+		   ccp-crypto-rsa.o \
+		   ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
new file mode 100644
index 0000000..3c6fe57
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -0,0 +1,423 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
+				 int ret)
+{
+	struct ahash_request *req = ahash_request_cast(async_req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+	unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+	if (ret)
+		goto e_free;
+
+	if (rctx->hash_rem) {
+		/* Save remaining data to buffer */
+		unsigned int offset = rctx->nbytes - rctx->hash_rem;
+
+		scatterwalk_map_and_copy(rctx->buf, rctx->src,
+					 offset, rctx->hash_rem, 0);
+		rctx->buf_count = rctx->hash_rem;
+	} else {
+		rctx->buf_count = 0;
+	}
+
+	/* Update result area if supplied */
+	if (req->result && rctx->final)
+		memcpy(req->result, rctx->iv, digest_size);
+
+e_free:
+	sg_free_table(&rctx->data_sg);
+
+	return ret;
+}
+
+static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
+			      unsigned int final)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+	struct scatterlist *sg, *cmac_key_sg = NULL;
+	unsigned int block_size =
+		crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	unsigned int need_pad, sg_count;
+	gfp_t gfp;
+	u64 len;
+	int ret;
+
+	if (!ctx->u.aes.key_len)
+		return -EINVAL;
+
+	if (nbytes)
+		rctx->null_msg = 0;
+
+	len = (u64)rctx->buf_count + (u64)nbytes;
+
+	if (!final && (len <= block_size)) {
+		scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
+					 0, nbytes, 0);
+		rctx->buf_count += nbytes;
+
+		return 0;
+	}
+
+	rctx->src = req->src;
+	rctx->nbytes = nbytes;
+
+	rctx->final = final;
+	rctx->hash_rem = final ? 0 : len & (block_size - 1);
+	rctx->hash_cnt = len - rctx->hash_rem;
+	if (!final && !rctx->hash_rem) {
+		/* CCP can't do zero length final, so keep some data around */
+		rctx->hash_cnt -= block_size;
+		rctx->hash_rem = block_size;
+	}
+
+	if (final && (rctx->null_msg || (len & (block_size - 1))))
+		need_pad = 1;
+	else
+		need_pad = 0;
+
+	sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
+
+	/* Build the data scatterlist table - allocate enough entries for all
+	 * possible data pieces (buffer, input data, padding)
+	 */
+	sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
+	gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+		GFP_KERNEL : GFP_ATOMIC;
+	ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
+	if (ret)
+		return ret;
+
+	sg = NULL;
+	if (rctx->buf_count) {
+		sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+		sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+		if (!sg) {
+			ret = -EINVAL;
+			goto e_free;
+		}
+	}
+
+	if (nbytes) {
+		sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+		if (!sg) {
+			ret = -EINVAL;
+			goto e_free;
+		}
+	}
+
+	if (need_pad) {
+		int pad_length = block_size - (len & (block_size - 1));
+
+		rctx->hash_cnt += pad_length;
+
+		memset(rctx->pad, 0, sizeof(rctx->pad));
+		rctx->pad[0] = 0x80;
+		sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
+		sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
+		if (!sg) {
+			ret = -EINVAL;
+			goto e_free;
+		}
+	}
+	if (sg) {
+		sg_mark_end(sg);
+		sg = rctx->data_sg.sgl;
+	}
+
+	/* Initialize the K1/K2 scatterlist */
+	if (final)
+		cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
+					 : &ctx->u.aes.k1_sg;
+
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_AES;
+	rctx->cmd.u.aes.type = ctx->u.aes.type;
+	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+	rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
+	rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+	rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+	rctx->cmd.u.aes.iv = &rctx->iv_sg;
+	rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
+	rctx->cmd.u.aes.src = sg;
+	rctx->cmd.u.aes.src_len = rctx->hash_cnt;
+	rctx->cmd.u.aes.dst = NULL;
+	rctx->cmd.u.aes.cmac_key = cmac_key_sg;
+	rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
+	rctx->cmd.u.aes.cmac_final = final;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+
+e_free:
+	sg_free_table(&rctx->data_sg);
+
+	return ret;
+}
+
+static int ccp_aes_cmac_init(struct ahash_request *req)
+{
+	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+
+	memset(rctx, 0, sizeof(*rctx));
+
+	rctx->null_msg = 1;
+
+	return 0;
+}
+
+static int ccp_aes_cmac_update(struct ahash_request *req)
+{
+	return ccp_do_cmac_update(req, req->nbytes, 0);
+}
+
+static int ccp_aes_cmac_final(struct ahash_request *req)
+{
+	return ccp_do_cmac_update(req, 0, 1);
+}
+
+static int ccp_aes_cmac_finup(struct ahash_request *req)
+{
+	return ccp_do_cmac_update(req, req->nbytes, 1);
+}
+
+static int ccp_aes_cmac_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = ccp_aes_cmac_init(req);
+	if (ret)
+		return ret;
+
+	return ccp_aes_cmac_finup(req);
+}
+
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+	struct ccp_aes_cmac_exp_ctx state;
+
+	/* Don't let anything leak to 'out' */
+	memset(&state, 0, sizeof(state));
+
+	state.null_msg = rctx->null_msg;
+	memcpy(state.iv, rctx->iv, sizeof(state.iv));
+	state.buf_count = rctx->buf_count;
+	memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+	/* 'out' may not be aligned so memcpy from local variable */
+	memcpy(out, &state, sizeof(state));
+
+	return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+	struct ccp_aes_cmac_exp_ctx state;
+
+	/* 'in' may not be aligned so memcpy to local variable */
+	memcpy(&state, in, sizeof(state));
+
+	memset(rctx, 0, sizeof(*rctx));
+	rctx->null_msg = state.null_msg;
+	memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+	rctx->buf_count = state.buf_count;
+	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+	return 0;
+}
+
+static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+			       unsigned int key_len)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct ccp_crypto_ahash_alg *alg =
+		ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
+	u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
+	u64 rb_hi = 0x00, rb_lo = 0x87;
+	__be64 *gk;
+	int ret;
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		ctx->u.aes.type = CCP_AES_TYPE_128;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->u.aes.type = CCP_AES_TYPE_192;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->u.aes.type = CCP_AES_TYPE_256;
+		break;
+	default:
+		crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->u.aes.mode = alg->mode;
+
+	/* Set to zero until complete */
+	ctx->u.aes.key_len = 0;
+
+	/* Set the key for the AES cipher used to generate the keys */
+	ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len);
+	if (ret)
+		return ret;
+
+	/* Encrypt a block of zeroes - use key area in context */
+	memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
+	crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key,
+				  ctx->u.aes.key);
+
+	/* Generate K1 and K2 */
+	k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
+	k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
+
+	k1_hi = (k0_hi << 1) | (k0_lo >> 63);
+	k1_lo = k0_lo << 1;
+	if (ctx->u.aes.key[0] & 0x80) {
+		k1_hi ^= rb_hi;
+		k1_lo ^= rb_lo;
+	}
+	gk = (__be64 *)ctx->u.aes.k1;
+	*gk = cpu_to_be64(k1_hi);
+	gk++;
+	*gk = cpu_to_be64(k1_lo);
+
+	k2_hi = (k1_hi << 1) | (k1_lo >> 63);
+	k2_lo = k1_lo << 1;
+	if (ctx->u.aes.k1[0] & 0x80) {
+		k2_hi ^= rb_hi;
+		k2_lo ^= rb_lo;
+	}
+	gk = (__be64 *)ctx->u.aes.k2;
+	*gk = cpu_to_be64(k2_hi);
+	gk++;
+	*gk = cpu_to_be64(k2_lo);
+
+	ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
+	sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
+	sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
+
+	/* Save the supplied key */
+	memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
+	memcpy(ctx->u.aes.key, key, key_len);
+	ctx->u.aes.key_len = key_len;
+	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+	return ret;
+}
+
+static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct crypto_cipher *cipher_tfm;
+
+	ctx->complete = ccp_aes_cmac_complete;
+	ctx->u.aes.key_len = 0;
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
+
+	cipher_tfm = crypto_alloc_cipher("aes", 0,
+					 CRYPTO_ALG_ASYNC |
+					 CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(cipher_tfm)) {
+		pr_warn("could not load aes cipher driver\n");
+		return PTR_ERR(cipher_tfm);
+	}
+	ctx->u.aes.tfm_cipher = cipher_tfm;
+
+	return 0;
+}
+
+static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->u.aes.tfm_cipher)
+		crypto_free_cipher(ctx->u.aes.tfm_cipher);
+	ctx->u.aes.tfm_cipher = NULL;
+}
+
+int ccp_register_aes_cmac_algs(struct list_head *head)
+{
+	struct ccp_crypto_ahash_alg *ccp_alg;
+	struct ahash_alg *alg;
+	struct hash_alg_common *halg;
+	struct crypto_alg *base;
+	int ret;
+
+	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+	if (!ccp_alg)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_alg->entry);
+	ccp_alg->mode = CCP_AES_MODE_CMAC;
+
+	alg = &ccp_alg->alg;
+	alg->init = ccp_aes_cmac_init;
+	alg->update = ccp_aes_cmac_update;
+	alg->final = ccp_aes_cmac_final;
+	alg->finup = ccp_aes_cmac_finup;
+	alg->digest = ccp_aes_cmac_digest;
+	alg->export = ccp_aes_cmac_export;
+	alg->import = ccp_aes_cmac_import;
+	alg->setkey = ccp_aes_cmac_setkey;
+
+	halg = &alg->halg;
+	halg->digestsize = AES_BLOCK_SIZE;
+	halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
+
+	base = &halg->base;
+	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
+	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
+	base->cra_flags = CRYPTO_ALG_ASYNC |
+			  CRYPTO_ALG_KERN_DRIVER_ONLY |
+			  CRYPTO_ALG_NEED_FALLBACK;
+	base->cra_blocksize = AES_BLOCK_SIZE;
+	base->cra_ctxsize = sizeof(struct ccp_ctx);
+	base->cra_priority = CCP_CRA_PRIORITY;
+	base->cra_init = ccp_aes_cmac_cra_init;
+	base->cra_exit = ccp_aes_cmac_cra_exit;
+	base->cra_module = THIS_MODULE;
+
+	ret = crypto_register_ahash(alg);
+	if (ret) {
+		pr_err("%s ahash algorithm registration error (%d)\n",
+		       base->cra_name, ret);
+		kfree(ccp_alg);
+		return ret;
+	}
+
+	list_add(&ccp_alg->entry, head);
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
new file mode 100644
index 0000000..ca1f0d7
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -0,0 +1,250 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support
+ *
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
+{
+	return ret;
+}
+
+static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+			      unsigned int key_len)
+{
+	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		ctx->u.aes.type = CCP_AES_TYPE_128;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->u.aes.type = CCP_AES_TYPE_192;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->u.aes.type = CCP_AES_TYPE_256;
+		break;
+	default:
+		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx->u.aes.mode = CCP_AES_MODE_GCM;
+	ctx->u.aes.key_len = key_len;
+
+	memcpy(ctx->u.aes.key, key, key_len);
+	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+	return 0;
+}
+
+static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+				   unsigned int authsize)
+{
+	return 0;
+}
+
+static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+	struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
+	struct scatterlist *iv_sg = NULL;
+	unsigned int iv_len = 0;
+	int i;
+	int ret = 0;
+
+	if (!ctx->u.aes.key_len)
+		return -EINVAL;
+
+	if (ctx->u.aes.mode != CCP_AES_MODE_GCM)
+		return -EINVAL;
+
+	if (!req->iv)
+		return -EINVAL;
+
+	/*
+	 * 5 parts:
+	 *   plaintext/ciphertext input
+	 *   AAD
+	 *   key
+	 *   IV
+	 *   Destination+tag buffer
+	 */
+
+	/* Prepare the IV: 12 bytes + an integer (counter) */
+	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
+	for (i = 0; i < 3; i++)
+		rctx->iv[i + GCM_AES_IV_SIZE] = 0;
+	rctx->iv[AES_BLOCK_SIZE - 1] = 1;
+
+	/* Set up a scatterlist for the IV */
+	iv_sg = &rctx->iv_sg;
+	iv_len = AES_BLOCK_SIZE;
+	sg_init_one(iv_sg, rctx->iv, iv_len);
+
+	/* The AAD + plaintext are concatenated in the src buffer */
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_AES;
+	rctx->cmd.u.aes.type = ctx->u.aes.type;
+	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+	rctx->cmd.u.aes.action = encrypt;
+	rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+	rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+	rctx->cmd.u.aes.iv = iv_sg;
+	rctx->cmd.u.aes.iv_len = iv_len;
+	rctx->cmd.u.aes.src = req->src;
+	rctx->cmd.u.aes.src_len = req->cryptlen;
+	rctx->cmd.u.aes.aad_len = req->assoclen;
+
+	/* The cipher text + the tag are in the dst buffer */
+	rctx->cmd.u.aes.dst = req->dst;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+}
+
+static int ccp_aes_gcm_encrypt(struct aead_request *req)
+{
+	return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT);
+}
+
+static int ccp_aes_gcm_decrypt(struct aead_request *req)
+{
+	return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT);
+}
+
+static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+
+	ctx->complete = ccp_aes_gcm_complete;
+	ctx->u.aes.key_len = 0;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
+
+	return 0;
+}
+
+static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct aead_alg ccp_aes_gcm_defaults = {
+	.setkey = ccp_aes_gcm_setkey,
+	.setauthsize = ccp_aes_gcm_setauthsize,
+	.encrypt = ccp_aes_gcm_encrypt,
+	.decrypt = ccp_aes_gcm_decrypt,
+	.init = ccp_aes_gcm_cra_init,
+	.ivsize = GCM_AES_IV_SIZE,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.base = {
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct ccp_ctx),
+		.cra_priority	= CCP_CRA_PRIORITY,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_exit	= ccp_aes_gcm_cra_exit,
+		.cra_module	= THIS_MODULE,
+	},
+};
+
+struct ccp_aes_aead_def {
+	enum ccp_aes_mode mode;
+	unsigned int version;
+	const char *name;
+	const char *driver_name;
+	unsigned int blocksize;
+	unsigned int ivsize;
+	struct aead_alg *alg_defaults;
+};
+
+static struct ccp_aes_aead_def aes_aead_algs[] = {
+	{
+		.mode		= CCP_AES_MODE_GHASH,
+		.version	= CCP_VERSION(5, 0),
+		.name		= "gcm(aes)",
+		.driver_name	= "gcm-aes-ccp",
+		.blocksize	= 1,
+		.ivsize		= AES_BLOCK_SIZE,
+		.alg_defaults	= &ccp_aes_gcm_defaults,
+	},
+};
+
+static int ccp_register_aes_aead(struct list_head *head,
+				 const struct ccp_aes_aead_def *def)
+{
+	struct ccp_crypto_aead *ccp_aead;
+	struct aead_alg *alg;
+	int ret;
+
+	ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL);
+	if (!ccp_aead)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_aead->entry);
+
+	ccp_aead->mode = def->mode;
+
+	/* Copy the defaults and override as necessary */
+	alg = &ccp_aead->alg;
+	*alg = *def->alg_defaults;
+	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->driver_name);
+	alg->base.cra_blocksize = def->blocksize;
+	alg->base.cra_ablkcipher.ivsize = def->ivsize;
+
+	ret = crypto_register_aead(alg);
+	if (ret) {
+		pr_err("%s ablkcipher algorithm registration error (%d)\n",
+		       alg->base.cra_name, ret);
+		kfree(ccp_aead);
+		return ret;
+	}
+
+	list_add(&ccp_aead->entry, head);
+
+	return 0;
+}
+
+int ccp_register_aes_aeads(struct list_head *head)
+{
+	int i, ret;
+	unsigned int ccpversion = ccp_version();
+
+	for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) {
+		if (aes_aead_algs[i].version > ccpversion)
+			continue;
+		ret = ccp_register_aes_aead(head, &aes_aead_algs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
new file mode 100644
index 0000000..94b5bcf
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -0,0 +1,291 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
+ *
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <crypto/aes.h>
+#include <crypto/xts.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+struct ccp_aes_xts_def {
+	const char *name;
+	const char *drv_name;
+};
+
+static struct ccp_aes_xts_def aes_xts_algs[] = {
+	{
+		.name		= "xts(aes)",
+		.drv_name	= "xts-aes-ccp",
+	},
+};
+
+struct ccp_unit_size_map {
+	unsigned int size;
+	u32 value;
+};
+
+static struct ccp_unit_size_map xts_unit_sizes[] = {
+	{
+		.size   = 16,
+		.value	= CCP_XTS_AES_UNIT_SIZE_16,
+	},
+	{
+		.size   = 512,
+		.value	= CCP_XTS_AES_UNIT_SIZE_512,
+	},
+	{
+		.size   = 1024,
+		.value	= CCP_XTS_AES_UNIT_SIZE_1024,
+	},
+	{
+		.size   = 2048,
+		.value	= CCP_XTS_AES_UNIT_SIZE_2048,
+	},
+	{
+		.size   = 4096,
+		.value	= CCP_XTS_AES_UNIT_SIZE_4096,
+	},
+};
+
+static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+	if (ret)
+		return ret;
+
+	memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+
+	return 0;
+}
+
+static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			      unsigned int key_len)
+{
+	struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
+	struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
+	unsigned int ccpversion = ccp_version();
+	int ret;
+
+	ret = xts_check_key(xfm, key, key_len);
+	if (ret)
+		return ret;
+
+	/* Version 3 devices support 128-bit keys; version 5 devices can
+	 * accommodate 128- and 256-bit keys.
+	 */
+	switch (key_len) {
+	case AES_KEYSIZE_128 * 2:
+		memcpy(ctx->u.aes.key, key, key_len);
+		break;
+	case AES_KEYSIZE_256 * 2:
+		if (ccpversion > CCP_VERSION(3, 0))
+			memcpy(ctx->u.aes.key, key, key_len);
+		break;
+	}
+	ctx->u.aes.key_len = key_len / 2;
+	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+	return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
+}
+
+static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+			     unsigned int encrypt)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+	unsigned int ccpversion = ccp_version();
+	unsigned int fallback = 0;
+	unsigned int unit;
+	u32 unit_size;
+	int ret;
+
+	if (!ctx->u.aes.key_len)
+		return -EINVAL;
+
+	if (req->nbytes & (AES_BLOCK_SIZE - 1))
+		return -EINVAL;
+
+	if (!req->info)
+		return -EINVAL;
+
+	/* Check conditions under which the CCP can fulfill a request. The
+	 * device can handle input plaintext of a length that is a multiple
+	 * of the unit_size, bug the crypto implementation only supports
+	 * the unit_size being equal to the input length. This limits the
+	 * number of scenarios we can handle.
+	 */
+	unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+	for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
+		if (req->nbytes == xts_unit_sizes[unit].size) {
+			unit_size = unit;
+			break;
+		}
+	}
+	/* The CCP has restrictions on block sizes. Also, a version 3 device
+	 * only supports AES-128 operations; version 5 CCPs support both
+	 * AES-128 and -256 operations.
+	 */
+	if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST)
+		fallback = 1;
+	if ((ccpversion < CCP_VERSION(5, 0)) &&
+	    (ctx->u.aes.key_len != AES_KEYSIZE_128))
+		fallback = 1;
+	if ((ctx->u.aes.key_len != AES_KEYSIZE_128) &&
+	    (ctx->u.aes.key_len != AES_KEYSIZE_256))
+		fallback = 1;
+	if (fallback) {
+		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
+
+		/* Use the fallback to process the request for any
+		 * unsupported unit sizes or key sizes
+		 */
+		skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
+		skcipher_request_set_callback(subreq, req->base.flags,
+					      NULL, NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+					   req->nbytes, req->info);
+		ret = encrypt ? crypto_skcipher_encrypt(subreq) :
+				crypto_skcipher_decrypt(subreq);
+		skcipher_request_zero(subreq);
+		return ret;
+	}
+
+	memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+	sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
+
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+	rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
+	rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+					   : CCP_AES_ACTION_DECRYPT;
+	rctx->cmd.u.xts.unit_size = unit_size;
+	rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
+	rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
+	rctx->cmd.u.xts.iv = &rctx->iv_sg;
+	rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
+	rctx->cmd.u.xts.src = req->src;
+	rctx->cmd.u.xts.src_len = req->nbytes;
+	rctx->cmd.u.xts.dst = req->dst;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+}
+
+static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+	return ccp_aes_xts_crypt(req, 1);
+}
+
+static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+	return ccp_aes_xts_crypt(req, 0);
+}
+
+static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_skcipher *fallback_tfm;
+
+	ctx->complete = ccp_aes_xts_complete;
+	ctx->u.aes.key_len = 0;
+
+	fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
+					     CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback_tfm)) {
+		pr_warn("could not load fallback driver xts(aes)\n");
+		return PTR_ERR(fallback_tfm);
+	}
+	ctx->u.aes.tfm_skcipher = fallback_tfm;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+
+	return 0;
+}
+
+static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
+}
+
+static int ccp_register_aes_xts_alg(struct list_head *head,
+				    const struct ccp_aes_xts_def *def)
+{
+	struct ccp_crypto_ablkcipher_alg *ccp_alg;
+	struct crypto_alg *alg;
+	int ret;
+
+	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+	if (!ccp_alg)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_alg->entry);
+
+	alg = &ccp_alg->alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->drv_name);
+	alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+			 CRYPTO_ALG_KERN_DRIVER_ONLY |
+			 CRYPTO_ALG_NEED_FALLBACK;
+	alg->cra_blocksize = AES_BLOCK_SIZE;
+	alg->cra_ctxsize = sizeof(struct ccp_ctx);
+	alg->cra_priority = CCP_CRA_PRIORITY;
+	alg->cra_type = &crypto_ablkcipher_type;
+	alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
+	alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
+	alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
+	alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
+	alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
+	alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
+	alg->cra_init = ccp_aes_xts_cra_init;
+	alg->cra_exit = ccp_aes_xts_cra_exit;
+	alg->cra_module = THIS_MODULE;
+
+	ret = crypto_register_alg(alg);
+	if (ret) {
+		pr_err("%s ablkcipher algorithm registration error (%d)\n",
+		       alg->cra_name, ret);
+		kfree(ccp_alg);
+		return ret;
+	}
+
+	list_add(&ccp_alg->entry, head);
+
+	return 0;
+}
+
+int ccp_register_aes_xts_algs(struct list_head *head)
+{
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
+		ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
new file mode 100644
index 0000000..89291c1
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -0,0 +1,378 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES crypto API support
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+	if (ret)
+		return ret;
+
+	if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
+		memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
+
+	return 0;
+}
+
+static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			  unsigned int key_len)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct ccp_crypto_ablkcipher_alg *alg =
+		ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		ctx->u.aes.type = CCP_AES_TYPE_128;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->u.aes.type = CCP_AES_TYPE_192;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->u.aes.type = CCP_AES_TYPE_256;
+		break;
+	default:
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->u.aes.mode = alg->mode;
+	ctx->u.aes.key_len = key_len;
+
+	memcpy(ctx->u.aes.key, key, key_len);
+	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+	return 0;
+}
+
+static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+	struct scatterlist *iv_sg = NULL;
+	unsigned int iv_len = 0;
+	int ret;
+
+	if (!ctx->u.aes.key_len)
+		return -EINVAL;
+
+	if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
+	     (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
+	     (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+	    (req->nbytes & (AES_BLOCK_SIZE - 1)))
+		return -EINVAL;
+
+	if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
+		if (!req->info)
+			return -EINVAL;
+
+		memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
+		iv_sg = &rctx->iv_sg;
+		iv_len = AES_BLOCK_SIZE;
+		sg_init_one(iv_sg, rctx->iv, iv_len);
+	}
+
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_AES;
+	rctx->cmd.u.aes.type = ctx->u.aes.type;
+	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+	rctx->cmd.u.aes.action =
+		(encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
+	rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+	rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+	rctx->cmd.u.aes.iv = iv_sg;
+	rctx->cmd.u.aes.iv_len = iv_len;
+	rctx->cmd.u.aes.src = req->src;
+	rctx->cmd.u.aes.src_len = req->nbytes;
+	rctx->cmd.u.aes.dst = req->dst;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+}
+
+static int ccp_aes_encrypt(struct ablkcipher_request *req)
+{
+	return ccp_aes_crypt(req, true);
+}
+
+static int ccp_aes_decrypt(struct ablkcipher_request *req)
+{
+	return ccp_aes_crypt(req, false);
+}
+
+static int ccp_aes_cra_init(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->complete = ccp_aes_complete;
+	ctx->u.aes.key_len = 0;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+
+	return 0;
+}
+
+static void ccp_aes_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
+				    int ret)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+	/* Restore the original pointer */
+	req->info = rctx->rfc3686_info;
+
+	return ccp_aes_complete(async_req, ret);
+}
+
+static int ccp_aes_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+				  unsigned int key_len)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+	if (key_len < CTR_RFC3686_NONCE_SIZE)
+		return -EINVAL;
+
+	key_len -= CTR_RFC3686_NONCE_SIZE;
+	memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE);
+
+	return ccp_aes_setkey(tfm, key, key_len);
+}
+
+static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+	u8 *iv;
+
+	/* Initialize the CTR block */
+	iv = rctx->rfc3686_iv;
+	memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
+
+	iv += CTR_RFC3686_NONCE_SIZE;
+	memcpy(iv, req->info, CTR_RFC3686_IV_SIZE);
+
+	iv += CTR_RFC3686_IV_SIZE;
+	*(__be32 *)iv = cpu_to_be32(1);
+
+	/* Point to the new IV */
+	rctx->rfc3686_info = req->info;
+	req->info = rctx->rfc3686_iv;
+
+	return ccp_aes_crypt(req, encrypt);
+}
+
+static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req)
+{
+	return ccp_aes_rfc3686_crypt(req, true);
+}
+
+static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req)
+{
+	return ccp_aes_rfc3686_crypt(req, false);
+}
+
+static int ccp_aes_rfc3686_cra_init(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->complete = ccp_aes_rfc3686_complete;
+	ctx->u.aes.key_len = 0;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
+
+	return 0;
+}
+
+static void ccp_aes_rfc3686_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg ccp_aes_defaults = {
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+			  CRYPTO_ALG_ASYNC |
+			  CRYPTO_ALG_KERN_DRIVER_ONLY |
+			  CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize	= AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct ccp_ctx),
+	.cra_priority	= CCP_CRA_PRIORITY,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_init	= ccp_aes_cra_init,
+	.cra_exit	= ccp_aes_cra_exit,
+	.cra_module	= THIS_MODULE,
+	.cra_ablkcipher	= {
+		.setkey		= ccp_aes_setkey,
+		.encrypt	= ccp_aes_encrypt,
+		.decrypt	= ccp_aes_decrypt,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+};
+
+static struct crypto_alg ccp_aes_rfc3686_defaults = {
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+			   CRYPTO_ALG_ASYNC |
+			   CRYPTO_ALG_KERN_DRIVER_ONLY |
+			   CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize	= CTR_RFC3686_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct ccp_ctx),
+	.cra_priority	= CCP_CRA_PRIORITY,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_init	= ccp_aes_rfc3686_cra_init,
+	.cra_exit	= ccp_aes_rfc3686_cra_exit,
+	.cra_module	= THIS_MODULE,
+	.cra_ablkcipher	= {
+		.setkey		= ccp_aes_rfc3686_setkey,
+		.encrypt	= ccp_aes_rfc3686_encrypt,
+		.decrypt	= ccp_aes_rfc3686_decrypt,
+		.min_keysize	= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+	},
+};
+
+struct ccp_aes_def {
+	enum ccp_aes_mode mode;
+	unsigned int version;
+	const char *name;
+	const char *driver_name;
+	unsigned int blocksize;
+	unsigned int ivsize;
+	struct crypto_alg *alg_defaults;
+};
+
+static struct ccp_aes_def aes_algs[] = {
+	{
+		.mode		= CCP_AES_MODE_ECB,
+		.version	= CCP_VERSION(3, 0),
+		.name		= "ecb(aes)",
+		.driver_name	= "ecb-aes-ccp",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= 0,
+		.alg_defaults	= &ccp_aes_defaults,
+	},
+	{
+		.mode		= CCP_AES_MODE_CBC,
+		.version	= CCP_VERSION(3, 0),
+		.name		= "cbc(aes)",
+		.driver_name	= "cbc-aes-ccp",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.alg_defaults	= &ccp_aes_defaults,
+	},
+	{
+		.mode		= CCP_AES_MODE_CFB,
+		.version	= CCP_VERSION(3, 0),
+		.name		= "cfb(aes)",
+		.driver_name	= "cfb-aes-ccp",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.alg_defaults	= &ccp_aes_defaults,
+	},
+	{
+		.mode		= CCP_AES_MODE_OFB,
+		.version	= CCP_VERSION(3, 0),
+		.name		= "ofb(aes)",
+		.driver_name	= "ofb-aes-ccp",
+		.blocksize	= 1,
+		.ivsize		= AES_BLOCK_SIZE,
+		.alg_defaults	= &ccp_aes_defaults,
+	},
+	{
+		.mode		= CCP_AES_MODE_CTR,
+		.version	= CCP_VERSION(3, 0),
+		.name		= "ctr(aes)",
+		.driver_name	= "ctr-aes-ccp",
+		.blocksize	= 1,
+		.ivsize		= AES_BLOCK_SIZE,
+		.alg_defaults	= &ccp_aes_defaults,
+	},
+	{
+		.mode		= CCP_AES_MODE_CTR,
+		.version	= CCP_VERSION(3, 0),
+		.name		= "rfc3686(ctr(aes))",
+		.driver_name	= "rfc3686-ctr-aes-ccp",
+		.blocksize	= 1,
+		.ivsize		= CTR_RFC3686_IV_SIZE,
+		.alg_defaults	= &ccp_aes_rfc3686_defaults,
+	},
+};
+
+static int ccp_register_aes_alg(struct list_head *head,
+				const struct ccp_aes_def *def)
+{
+	struct ccp_crypto_ablkcipher_alg *ccp_alg;
+	struct crypto_alg *alg;
+	int ret;
+
+	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+	if (!ccp_alg)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_alg->entry);
+
+	ccp_alg->mode = def->mode;
+
+	/* Copy the defaults and override as necessary */
+	alg = &ccp_alg->alg;
+	*alg = *def->alg_defaults;
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->driver_name);
+	alg->cra_blocksize = def->blocksize;
+	alg->cra_ablkcipher.ivsize = def->ivsize;
+
+	ret = crypto_register_alg(alg);
+	if (ret) {
+		pr_err("%s ablkcipher algorithm registration error (%d)\n",
+		       alg->cra_name, ret);
+		kfree(ccp_alg);
+		return ret;
+	}
+
+	list_add(&ccp_alg->entry, head);
+
+	return 0;
+}
+
+int ccp_register_aes_algs(struct list_head *head)
+{
+	int i, ret;
+	unsigned int ccpversion = ccp_version();
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+		if (aes_algs[i].version > ccpversion)
+			continue;
+		ret = ccp_register_aes_alg(head, &aes_algs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
new file mode 100644
index 0000000..ae87b74
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -0,0 +1,254 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) DES3 crypto API support
+ *
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <ghook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/des.h>
+
+#include "ccp-crypto.h"
+
+static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+	if (ret)
+		return ret;
+
+	if (ctx->u.des3.mode != CCP_DES3_MODE_ECB)
+		memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
+
+	return 0;
+}
+
+static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+		unsigned int key_len)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct ccp_crypto_ablkcipher_alg *alg =
+		ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+	u32 *flags = &tfm->base.crt_flags;
+
+
+	/* From des_generic.c:
+	 *
+	 * RFC2451:
+	 *   If the first two or last two independent 64-bit keys are
+	 *   equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
+	 *   same as DES.  Implementers MUST reject keys that exhibit this
+	 *   property.
+	 */
+	const u32 *K = (const u32 *)key;
+
+	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+		     (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	/* It's not clear that there is any support for a keysize of 112.
+	 * If needed, the caller should make K1 == K3
+	 */
+	ctx->u.des3.type = CCP_DES3_TYPE_168;
+	ctx->u.des3.mode = alg->mode;
+	ctx->u.des3.key_len = key_len;
+
+	memcpy(ctx->u.des3.key, key, key_len);
+	sg_init_one(&ctx->u.des3.key_sg, ctx->u.des3.key, key_len);
+
+	return 0;
+}
+
+static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+	struct scatterlist *iv_sg = NULL;
+	unsigned int iv_len = 0;
+	int ret;
+
+	if (!ctx->u.des3.key_len)
+		return -EINVAL;
+
+	if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) ||
+	     (ctx->u.des3.mode == CCP_DES3_MODE_CBC)) &&
+	    (req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
+		return -EINVAL;
+
+	if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) {
+		if (!req->info)
+			return -EINVAL;
+
+		memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
+		iv_sg = &rctx->iv_sg;
+		iv_len = DES3_EDE_BLOCK_SIZE;
+		sg_init_one(iv_sg, rctx->iv, iv_len);
+	}
+
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_DES3;
+	rctx->cmd.u.des3.type = ctx->u.des3.type;
+	rctx->cmd.u.des3.mode = ctx->u.des3.mode;
+	rctx->cmd.u.des3.action = (encrypt)
+				  ? CCP_DES3_ACTION_ENCRYPT
+				  : CCP_DES3_ACTION_DECRYPT;
+	rctx->cmd.u.des3.key = &ctx->u.des3.key_sg;
+	rctx->cmd.u.des3.key_len = ctx->u.des3.key_len;
+	rctx->cmd.u.des3.iv = iv_sg;
+	rctx->cmd.u.des3.iv_len = iv_len;
+	rctx->cmd.u.des3.src = req->src;
+	rctx->cmd.u.des3.src_len = req->nbytes;
+	rctx->cmd.u.des3.dst = req->dst;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+}
+
+static int ccp_des3_encrypt(struct ablkcipher_request *req)
+{
+	return ccp_des3_crypt(req, true);
+}
+
+static int ccp_des3_decrypt(struct ablkcipher_request *req)
+{
+	return ccp_des3_crypt(req, false);
+}
+
+static int ccp_des3_cra_init(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->complete = ccp_des3_complete;
+	ctx->u.des3.key_len = 0;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_des3_req_ctx);
+
+	return 0;
+}
+
+static void ccp_des3_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg ccp_des3_defaults = {
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+		CRYPTO_ALG_ASYNC |
+		CRYPTO_ALG_KERN_DRIVER_ONLY |
+		CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct ccp_ctx),
+	.cra_priority	= CCP_CRA_PRIORITY,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_init	= ccp_des3_cra_init,
+	.cra_exit	= ccp_des3_cra_exit,
+	.cra_module	= THIS_MODULE,
+	.cra_ablkcipher	= {
+		.setkey		= ccp_des3_setkey,
+		.encrypt	= ccp_des3_encrypt,
+		.decrypt	= ccp_des3_decrypt,
+		.min_keysize	= DES3_EDE_KEY_SIZE,
+		.max_keysize	= DES3_EDE_KEY_SIZE,
+	},
+};
+
+struct ccp_des3_def {
+	enum ccp_des3_mode mode;
+	unsigned int version;
+	const char *name;
+	const char *driver_name;
+	unsigned int blocksize;
+	unsigned int ivsize;
+	struct crypto_alg *alg_defaults;
+};
+
+static struct ccp_des3_def des3_algs[] = {
+	{
+		.mode		= CCP_DES3_MODE_ECB,
+		.version	= CCP_VERSION(5, 0),
+		.name		= "ecb(des3_ede)",
+		.driver_name	= "ecb-des3-ccp",
+		.blocksize	= DES3_EDE_BLOCK_SIZE,
+		.ivsize		= 0,
+		.alg_defaults	= &ccp_des3_defaults,
+	},
+	{
+		.mode		= CCP_DES3_MODE_CBC,
+		.version	= CCP_VERSION(5, 0),
+		.name		= "cbc(des3_ede)",
+		.driver_name	= "cbc-des3-ccp",
+		.blocksize	= DES3_EDE_BLOCK_SIZE,
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.alg_defaults	= &ccp_des3_defaults,
+	},
+};
+
+static int ccp_register_des3_alg(struct list_head *head,
+				 const struct ccp_des3_def *def)
+{
+	struct ccp_crypto_ablkcipher_alg *ccp_alg;
+	struct crypto_alg *alg;
+	int ret;
+
+	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+	if (!ccp_alg)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_alg->entry);
+
+	ccp_alg->mode = def->mode;
+
+	/* Copy the defaults and override as necessary */
+	alg = &ccp_alg->alg;
+	*alg = *def->alg_defaults;
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+			def->driver_name);
+	alg->cra_blocksize = def->blocksize;
+	alg->cra_ablkcipher.ivsize = def->ivsize;
+
+	ret = crypto_register_alg(alg);
+	if (ret) {
+		pr_err("%s ablkcipher algorithm registration error (%d)\n",
+				alg->cra_name, ret);
+		kfree(ccp_alg);
+		return ret;
+	}
+
+	list_add(&ccp_alg->entry, head);
+
+	return 0;
+}
+
+int ccp_register_des3_algs(struct list_head *head)
+{
+	int i, ret;
+	unsigned int ccpversion = ccp_version();
+
+	for (i = 0; i < ARRAY_SIZE(des3_algs); i++) {
+		if (des3_algs[i].version > ccpversion)
+			continue;
+		ret = ccp_register_des3_alg(head, &des3_algs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
new file mode 100644
index 0000000..b95d199
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -0,0 +1,432 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) crypto API support
+ *
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/ccp.h>
+#include <linux/scatterlist.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/akcipher.h>
+
+#include "ccp-crypto.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
+
+static unsigned int aes_disable;
+module_param(aes_disable, uint, 0444);
+MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
+
+static unsigned int sha_disable;
+module_param(sha_disable, uint, 0444);
+MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
+
+static unsigned int des3_disable;
+module_param(des3_disable, uint, 0444);
+MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
+
+static unsigned int rsa_disable;
+module_param(rsa_disable, uint, 0444);
+MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
+
+/* List heads for the supported algorithms */
+static LIST_HEAD(hash_algs);
+static LIST_HEAD(cipher_algs);
+static LIST_HEAD(aead_algs);
+static LIST_HEAD(akcipher_algs);
+
+/* For any tfm, requests for that tfm must be returned on the order
+ * received.  With multiple queues available, the CCP can process more
+ * than one cmd at a time.  Therefore we must maintain a cmd list to insure
+ * the proper ordering of requests on a given tfm.
+ */
+struct ccp_crypto_queue {
+	struct list_head cmds;
+	struct list_head *backlog;
+	unsigned int cmd_count;
+};
+
+#define CCP_CRYPTO_MAX_QLEN	100
+
+static struct ccp_crypto_queue req_queue;
+static spinlock_t req_queue_lock;
+
+struct ccp_crypto_cmd {
+	struct list_head entry;
+
+	struct ccp_cmd *cmd;
+
+	/* Save the crypto_tfm and crypto_async_request addresses
+	 * separately to avoid any reference to a possibly invalid
+	 * crypto_async_request structure after invoking the request
+	 * callback
+	 */
+	struct crypto_async_request *req;
+	struct crypto_tfm *tfm;
+
+	/* Used for held command processing to determine state */
+	int ret;
+};
+
+struct ccp_crypto_cpu {
+	struct work_struct work;
+	struct completion completion;
+	struct ccp_crypto_cmd *crypto_cmd;
+	int err;
+};
+
+static inline bool ccp_crypto_success(int err)
+{
+	if (err && (err != -EINPROGRESS) && (err != -EBUSY))
+		return false;
+
+	return true;
+}
+
+static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
+	struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
+{
+	struct ccp_crypto_cmd *held = NULL, *tmp;
+	unsigned long flags;
+
+	*backlog = NULL;
+
+	spin_lock_irqsave(&req_queue_lock, flags);
+
+	/* Held cmds will be after the current cmd in the queue so start
+	 * searching for a cmd with a matching tfm for submission.
+	 */
+	tmp = crypto_cmd;
+	list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
+		if (crypto_cmd->tfm != tmp->tfm)
+			continue;
+		held = tmp;
+		break;
+	}
+
+	/* Process the backlog:
+	 *   Because cmds can be executed from any point in the cmd list
+	 *   special precautions have to be taken when handling the backlog.
+	 */
+	if (req_queue.backlog != &req_queue.cmds) {
+		/* Skip over this cmd if it is the next backlog cmd */
+		if (req_queue.backlog == &crypto_cmd->entry)
+			req_queue.backlog = crypto_cmd->entry.next;
+
+		*backlog = container_of(req_queue.backlog,
+					struct ccp_crypto_cmd, entry);
+		req_queue.backlog = req_queue.backlog->next;
+
+		/* Skip over this cmd if it is now the next backlog cmd */
+		if (req_queue.backlog == &crypto_cmd->entry)
+			req_queue.backlog = crypto_cmd->entry.next;
+	}
+
+	/* Remove the cmd entry from the list of cmds */
+	req_queue.cmd_count--;
+	list_del(&crypto_cmd->entry);
+
+	spin_unlock_irqrestore(&req_queue_lock, flags);
+
+	return held;
+}
+
+static void ccp_crypto_complete(void *data, int err)
+{
+	struct ccp_crypto_cmd *crypto_cmd = data;
+	struct ccp_crypto_cmd *held, *next, *backlog;
+	struct crypto_async_request *req = crypto_cmd->req;
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
+	int ret;
+
+	if (err == -EINPROGRESS) {
+		/* Only propagate the -EINPROGRESS if necessary */
+		if (crypto_cmd->ret == -EBUSY) {
+			crypto_cmd->ret = -EINPROGRESS;
+			req->complete(req, -EINPROGRESS);
+		}
+
+		return;
+	}
+
+	/* Operation has completed - update the queue before invoking
+	 * the completion callbacks and retrieve the next cmd (cmd with
+	 * a matching tfm) that can be submitted to the CCP.
+	 */
+	held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
+	if (backlog) {
+		backlog->ret = -EINPROGRESS;
+		backlog->req->complete(backlog->req, -EINPROGRESS);
+	}
+
+	/* Transition the state from -EBUSY to -EINPROGRESS first */
+	if (crypto_cmd->ret == -EBUSY)
+		req->complete(req, -EINPROGRESS);
+
+	/* Completion callbacks */
+	ret = err;
+	if (ctx->complete)
+		ret = ctx->complete(req, ret);
+	req->complete(req, ret);
+
+	/* Submit the next cmd */
+	while (held) {
+		/* Since we have already queued the cmd, we must indicate that
+		 * we can backlog so as not to "lose" this request.
+		 */
+		held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
+		ret = ccp_enqueue_cmd(held->cmd);
+		if (ccp_crypto_success(ret))
+			break;
+
+		/* Error occurred, report it and get the next entry */
+		ctx = crypto_tfm_ctx(held->req->tfm);
+		if (ctx->complete)
+			ret = ctx->complete(held->req, ret);
+		held->req->complete(held->req, ret);
+
+		next = ccp_crypto_cmd_complete(held, &backlog);
+		if (backlog) {
+			backlog->ret = -EINPROGRESS;
+			backlog->req->complete(backlog->req, -EINPROGRESS);
+		}
+
+		kfree(held);
+		held = next;
+	}
+
+	kfree(crypto_cmd);
+}
+
+static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
+{
+	struct ccp_crypto_cmd *active = NULL, *tmp;
+	unsigned long flags;
+	bool free_cmd = true;
+	int ret;
+
+	spin_lock_irqsave(&req_queue_lock, flags);
+
+	/* Check if the cmd can/should be queued */
+	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+		if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
+			ret = -ENOSPC;
+			goto e_lock;
+		}
+	}
+
+	/* Look for an entry with the same tfm.  If there is a cmd
+	 * with the same tfm in the list then the current cmd cannot
+	 * be submitted to the CCP yet.
+	 */
+	list_for_each_entry(tmp, &req_queue.cmds, entry) {
+		if (crypto_cmd->tfm != tmp->tfm)
+			continue;
+		active = tmp;
+		break;
+	}
+
+	ret = -EINPROGRESS;
+	if (!active) {
+		ret = ccp_enqueue_cmd(crypto_cmd->cmd);
+		if (!ccp_crypto_success(ret))
+			goto e_lock;	/* Error, don't queue it */
+	}
+
+	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+		ret = -EBUSY;
+		if (req_queue.backlog == &req_queue.cmds)
+			req_queue.backlog = &crypto_cmd->entry;
+	}
+	crypto_cmd->ret = ret;
+
+	req_queue.cmd_count++;
+	list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
+
+	free_cmd = false;
+
+e_lock:
+	spin_unlock_irqrestore(&req_queue_lock, flags);
+
+	if (free_cmd)
+		kfree(crypto_cmd);
+
+	return ret;
+}
+
+/**
+ * ccp_crypto_enqueue_request - queue an crypto async request for processing
+ *				by the CCP
+ *
+ * @req: crypto_async_request struct to be processed
+ * @cmd: ccp_cmd struct to be sent to the CCP
+ */
+int ccp_crypto_enqueue_request(struct crypto_async_request *req,
+			       struct ccp_cmd *cmd)
+{
+	struct ccp_crypto_cmd *crypto_cmd;
+	gfp_t gfp;
+
+	gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+
+	crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
+	if (!crypto_cmd)
+		return -ENOMEM;
+
+	/* The tfm pointer must be saved and not referenced from the
+	 * crypto_async_request (req) pointer because it is used after
+	 * completion callback for the request and the req pointer
+	 * might not be valid anymore.
+	 */
+	crypto_cmd->cmd = cmd;
+	crypto_cmd->req = req;
+	crypto_cmd->tfm = req->tfm;
+
+	cmd->callback = ccp_crypto_complete;
+	cmd->data = crypto_cmd;
+
+	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+		cmd->flags |= CCP_CMD_MAY_BACKLOG;
+	else
+		cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
+
+	return ccp_crypto_enqueue_cmd(crypto_cmd);
+}
+
+struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
+					    struct scatterlist *sg_add)
+{
+	struct scatterlist *sg, *sg_last = NULL;
+
+	for (sg = table->sgl; sg; sg = sg_next(sg))
+		if (!sg_page(sg))
+			break;
+	if (WARN_ON(!sg))
+		return NULL;
+
+	for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
+		sg_set_page(sg, sg_page(sg_add), sg_add->length,
+			    sg_add->offset);
+		sg_last = sg;
+	}
+	if (WARN_ON(sg_add))
+		return NULL;
+
+	return sg_last;
+}
+
+static int ccp_register_algs(void)
+{
+	int ret;
+
+	if (!aes_disable) {
+		ret = ccp_register_aes_algs(&cipher_algs);
+		if (ret)
+			return ret;
+
+		ret = ccp_register_aes_cmac_algs(&hash_algs);
+		if (ret)
+			return ret;
+
+		ret = ccp_register_aes_xts_algs(&cipher_algs);
+		if (ret)
+			return ret;
+
+		ret = ccp_register_aes_aeads(&aead_algs);
+		if (ret)
+			return ret;
+	}
+
+	if (!des3_disable) {
+		ret = ccp_register_des3_algs(&cipher_algs);
+		if (ret)
+			return ret;
+	}
+
+	if (!sha_disable) {
+		ret = ccp_register_sha_algs(&hash_algs);
+		if (ret)
+			return ret;
+	}
+
+	if (!rsa_disable) {
+		ret = ccp_register_rsa_algs(&akcipher_algs);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void ccp_unregister_algs(void)
+{
+	struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
+	struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
+	struct ccp_crypto_aead *aead_alg, *aead_tmp;
+	struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
+
+	list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
+		crypto_unregister_ahash(&ahash_alg->alg);
+		list_del(&ahash_alg->entry);
+		kfree(ahash_alg);
+	}
+
+	list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
+		crypto_unregister_alg(&ablk_alg->alg);
+		list_del(&ablk_alg->entry);
+		kfree(ablk_alg);
+	}
+
+	list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
+		crypto_unregister_aead(&aead_alg->alg);
+		list_del(&aead_alg->entry);
+		kfree(aead_alg);
+	}
+
+	list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
+		crypto_unregister_akcipher(&akc_alg->alg);
+		list_del(&akc_alg->entry);
+		kfree(akc_alg);
+	}
+}
+
+static int ccp_crypto_init(void)
+{
+	int ret;
+
+	ret = ccp_present();
+	if (ret)
+		return ret;
+
+	spin_lock_init(&req_queue_lock);
+	INIT_LIST_HEAD(&req_queue.cmds);
+	req_queue.backlog = &req_queue.cmds;
+	req_queue.cmd_count = 0;
+
+	ret = ccp_register_algs();
+	if (ret)
+		ccp_unregister_algs();
+
+	return ret;
+}
+
+static void ccp_crypto_exit(void)
+{
+	ccp_unregister_algs();
+}
+
+module_init(ccp_crypto_init);
+module_exit(ccp_crypto_exit);
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
new file mode 100644
index 0000000..05850df
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -0,0 +1,298 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static inline struct akcipher_request *akcipher_request_cast(
+	struct crypto_async_request *req)
+{
+	return container_of(req, struct akcipher_request, base);
+}
+
+static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
+					    const u8 *buf, size_t sz)
+{
+	int nskip;
+
+	for (nskip = 0; nskip < sz; nskip++)
+		if (buf[nskip])
+			break;
+	*kplen = sz - nskip;
+	*kpbuf = kzalloc(*kplen, GFP_KERNEL);
+	if (!*kpbuf)
+		return -ENOMEM;
+	memcpy(*kpbuf, buf + nskip, *kplen);
+
+	return 0;
+}
+
+static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
+{
+	struct akcipher_request *req = akcipher_request_cast(async_req);
+	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+
+	if (ret)
+		return ret;
+
+	req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
+
+	return 0;
+}
+
+static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
+{
+	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	return ctx->u.rsa.n_len;
+}
+
+static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+	int ret = 0;
+
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_RSA;
+
+	rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
+	if (encrypt) {
+		rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
+		rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
+	} else {
+		rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
+		rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
+	}
+	rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
+	rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
+	rctx->cmd.u.rsa.src = req->src;
+	rctx->cmd.u.rsa.src_len = req->src_len;
+	rctx->cmd.u.rsa.dst = req->dst;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+}
+
+static int ccp_rsa_encrypt(struct akcipher_request *req)
+{
+	return ccp_rsa_crypt(req, true);
+}
+
+static int ccp_rsa_decrypt(struct akcipher_request *req)
+{
+	return ccp_rsa_crypt(req, false);
+}
+
+static int ccp_check_key_length(unsigned int len)
+{
+	/* In bits */
+	if (len < 8 || len > 4096)
+		return -EINVAL;
+	return 0;
+}
+
+static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
+{
+	/* Clean up old key data */
+	kzfree(ctx->u.rsa.e_buf);
+	ctx->u.rsa.e_buf = NULL;
+	ctx->u.rsa.e_len = 0;
+	kzfree(ctx->u.rsa.n_buf);
+	ctx->u.rsa.n_buf = NULL;
+	ctx->u.rsa.n_len = 0;
+	kzfree(ctx->u.rsa.d_buf);
+	ctx->u.rsa.d_buf = NULL;
+	ctx->u.rsa.d_len = 0;
+}
+
+static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+			  unsigned int keylen, bool private)
+{
+	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct rsa_key raw_key;
+	int ret;
+
+	ccp_rsa_free_key_bufs(ctx);
+	memset(&raw_key, 0, sizeof(raw_key));
+
+	/* Code borrowed from crypto/rsa.c */
+	if (private)
+		ret = rsa_parse_priv_key(&raw_key, key, keylen);
+	else
+		ret = rsa_parse_pub_key(&raw_key, key, keylen);
+	if (ret)
+		goto n_key;
+
+	ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len,
+					raw_key.n, raw_key.n_sz);
+	if (ret)
+		goto key_err;
+	sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
+
+	ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */
+	if (ccp_check_key_length(ctx->u.rsa.key_len)) {
+		ret = -EINVAL;
+		goto key_err;
+	}
+
+	ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len,
+					raw_key.e, raw_key.e_sz);
+	if (ret)
+		goto key_err;
+	sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
+
+	if (private) {
+		ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf,
+						&ctx->u.rsa.d_len,
+						raw_key.d, raw_key.d_sz);
+		if (ret)
+			goto key_err;
+		sg_init_one(&ctx->u.rsa.d_sg,
+			    ctx->u.rsa.d_buf, ctx->u.rsa.d_len);
+	}
+
+	return 0;
+
+key_err:
+	ccp_rsa_free_key_bufs(ctx);
+
+n_key:
+	return ret;
+}
+
+static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+			      unsigned int keylen)
+{
+	return ccp_rsa_setkey(tfm, key, keylen, true);
+}
+
+static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+			     unsigned int keylen)
+{
+	return ccp_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
+	ctx->complete = ccp_rsa_complete;
+
+	return 0;
+}
+
+static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+	ccp_rsa_free_key_bufs(ctx);
+}
+
+static struct akcipher_alg ccp_rsa_defaults = {
+	.encrypt = ccp_rsa_encrypt,
+	.decrypt = ccp_rsa_decrypt,
+	.sign = ccp_rsa_decrypt,
+	.verify = ccp_rsa_encrypt,
+	.set_pub_key = ccp_rsa_setpubkey,
+	.set_priv_key = ccp_rsa_setprivkey,
+	.max_size = ccp_rsa_maxsize,
+	.init = ccp_rsa_init_tfm,
+	.exit = ccp_rsa_exit_tfm,
+	.base = {
+		.cra_name = "rsa",
+		.cra_driver_name = "rsa-ccp",
+		.cra_priority = CCP_CRA_PRIORITY,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = 2 * sizeof(struct ccp_ctx),
+	},
+};
+
+struct ccp_rsa_def {
+	unsigned int version;
+	const char *name;
+	const char *driver_name;
+	unsigned int reqsize;
+	struct akcipher_alg *alg_defaults;
+};
+
+static struct ccp_rsa_def rsa_algs[] = {
+	{
+		.version	= CCP_VERSION(3, 0),
+		.name		= "rsa",
+		.driver_name	= "rsa-ccp",
+		.reqsize	= sizeof(struct ccp_rsa_req_ctx),
+		.alg_defaults	= &ccp_rsa_defaults,
+	}
+};
+
+int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+{
+	struct ccp_crypto_akcipher_alg *ccp_alg;
+	struct akcipher_alg *alg;
+	int ret;
+
+	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+	if (!ccp_alg)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_alg->entry);
+
+	alg = &ccp_alg->alg;
+	*alg = *def->alg_defaults;
+	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->driver_name);
+	ret = crypto_register_akcipher(alg);
+	if (ret) {
+		pr_err("%s akcipher algorithm registration error (%d)\n",
+		       alg->base.cra_name, ret);
+		kfree(ccp_alg);
+		return ret;
+	}
+
+	list_add(&ccp_alg->entry, head);
+
+	return 0;
+}
+
+int ccp_register_rsa_algs(struct list_head *head)
+{
+	int i, ret;
+	unsigned int ccpversion = ccp_version();
+
+	/* Register the RSA algorithm in standard mode
+	 * This works for CCP v3 and later
+	 */
+	for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) {
+		if (rsa_algs[i].version > ccpversion)
+			continue;
+		ret = ccp_register_rsa_alg(head, &rsa_algs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
new file mode 100644
index 0000000..2ca64bb
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -0,0 +1,539 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
+ *
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
+{
+	struct ahash_request *req = ahash_request_cast(async_req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+	unsigned int digest_size = crypto_ahash_digestsize(tfm);
+
+	if (ret)
+		goto e_free;
+
+	if (rctx->hash_rem) {
+		/* Save remaining data to buffer */
+		unsigned int offset = rctx->nbytes - rctx->hash_rem;
+
+		scatterwalk_map_and_copy(rctx->buf, rctx->src,
+					 offset, rctx->hash_rem, 0);
+		rctx->buf_count = rctx->hash_rem;
+	} else {
+		rctx->buf_count = 0;
+	}
+
+	/* Update result area if supplied */
+	if (req->result && rctx->final)
+		memcpy(req->result, rctx->ctx, digest_size);
+
+e_free:
+	sg_free_table(&rctx->data_sg);
+
+	return ret;
+}
+
+static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
+			     unsigned int final)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct scatterlist *sg;
+	unsigned int block_size =
+		crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	unsigned int sg_count;
+	gfp_t gfp;
+	u64 len;
+	int ret;
+
+	len = (u64)rctx->buf_count + (u64)nbytes;
+
+	if (!final && (len <= block_size)) {
+		scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
+					 0, nbytes, 0);
+		rctx->buf_count += nbytes;
+
+		return 0;
+	}
+
+	rctx->src = req->src;
+	rctx->nbytes = nbytes;
+
+	rctx->final = final;
+	rctx->hash_rem = final ? 0 : len & (block_size - 1);
+	rctx->hash_cnt = len - rctx->hash_rem;
+	if (!final && !rctx->hash_rem) {
+		/* CCP can't do zero length final, so keep some data around */
+		rctx->hash_cnt -= block_size;
+		rctx->hash_rem = block_size;
+	}
+
+	/* Initialize the context scatterlist */
+	sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));
+
+	sg = NULL;
+	if (rctx->buf_count && nbytes) {
+		/* Build the data scatterlist table - allocate enough entries
+		 * for both data pieces (buffer and input data)
+		 */
+		gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+			GFP_KERNEL : GFP_ATOMIC;
+		sg_count = sg_nents(req->src) + 1;
+		ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
+		if (ret)
+			return ret;
+
+		sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+		sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+		if (!sg) {
+			ret = -EINVAL;
+			goto e_free;
+		}
+		sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+		if (!sg) {
+			ret = -EINVAL;
+			goto e_free;
+		}
+		sg_mark_end(sg);
+
+		sg = rctx->data_sg.sgl;
+	} else if (rctx->buf_count) {
+		sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
+
+		sg = &rctx->buf_sg;
+	} else if (nbytes) {
+		sg = req->src;
+	}
+
+	rctx->msg_bits += (rctx->hash_cnt << 3);	/* Total in bits */
+
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_SHA;
+	rctx->cmd.u.sha.type = rctx->type;
+	rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
+
+	switch (rctx->type) {
+	case CCP_SHA_TYPE_1:
+		rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_224:
+		rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_256:
+		rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_384:
+		rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_512:
+		rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE;
+		break;
+	default:
+		/* Should never get here */
+		break;
+	}
+
+	rctx->cmd.u.sha.src = sg;
+	rctx->cmd.u.sha.src_len = rctx->hash_cnt;
+	rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
+		&ctx->u.sha.opad_sg : NULL;
+	rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ?
+		ctx->u.sha.opad_count : 0;
+	rctx->cmd.u.sha.first = rctx->first;
+	rctx->cmd.u.sha.final = rctx->final;
+	rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
+
+	rctx->first = 0;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+
+e_free:
+	sg_free_table(&rctx->data_sg);
+
+	return ret;
+}
+
+static int ccp_sha_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct ccp_crypto_ahash_alg *alg =
+		ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
+	unsigned int block_size =
+		crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	memset(rctx, 0, sizeof(*rctx));
+
+	rctx->type = alg->type;
+	rctx->first = 1;
+
+	if (ctx->u.sha.key_len) {
+		/* Buffer the HMAC key for first update */
+		memcpy(rctx->buf, ctx->u.sha.ipad, block_size);
+		rctx->buf_count = block_size;
+	}
+
+	return 0;
+}
+
+static int ccp_sha_update(struct ahash_request *req)
+{
+	return ccp_do_sha_update(req, req->nbytes, 0);
+}
+
+static int ccp_sha_final(struct ahash_request *req)
+{
+	return ccp_do_sha_update(req, 0, 1);
+}
+
+static int ccp_sha_finup(struct ahash_request *req)
+{
+	return ccp_do_sha_update(req, req->nbytes, 1);
+}
+
+static int ccp_sha_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = ccp_sha_init(req);
+	if (ret)
+		return ret;
+
+	return ccp_sha_finup(req);
+}
+
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct ccp_sha_exp_ctx state;
+
+	/* Don't let anything leak to 'out' */
+	memset(&state, 0, sizeof(state));
+
+	state.type = rctx->type;
+	state.msg_bits = rctx->msg_bits;
+	state.first = rctx->first;
+	memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+	state.buf_count = rctx->buf_count;
+	memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+	/* 'out' may not be aligned so memcpy from local variable */
+	memcpy(out, &state, sizeof(state));
+
+	return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct ccp_sha_exp_ctx state;
+
+	/* 'in' may not be aligned so memcpy to local variable */
+	memcpy(&state, in, sizeof(state));
+
+	memset(rctx, 0, sizeof(*rctx));
+	rctx->type = state.type;
+	rctx->msg_bits = state.msg_bits;
+	rctx->first = state.first;
+	memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+	rctx->buf_count = state.buf_count;
+	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+	return 0;
+}
+
+static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
+			  unsigned int key_len)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
+
+	SHASH_DESC_ON_STACK(sdesc, shash);
+
+	unsigned int block_size = crypto_shash_blocksize(shash);
+	unsigned int digest_size = crypto_shash_digestsize(shash);
+	int i, ret;
+
+	/* Set to zero until complete */
+	ctx->u.sha.key_len = 0;
+
+	/* Clear key area to provide zero padding for keys smaller
+	 * than the block size
+	 */
+	memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key));
+
+	if (key_len > block_size) {
+		/* Must hash the input key */
+		sdesc->tfm = shash;
+		sdesc->flags = crypto_ahash_get_flags(tfm) &
+			CRYPTO_TFM_REQ_MAY_SLEEP;
+
+		ret = crypto_shash_digest(sdesc, key, key_len,
+					  ctx->u.sha.key);
+		if (ret) {
+			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+			return -EINVAL;
+		}
+
+		key_len = digest_size;
+	} else {
+		memcpy(ctx->u.sha.key, key, key_len);
+	}
+
+	for (i = 0; i < block_size; i++) {
+		ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ HMAC_IPAD_VALUE;
+		ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ HMAC_OPAD_VALUE;
+	}
+
+	sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size);
+	ctx->u.sha.opad_count = block_size;
+
+	ctx->u.sha.key_len = key_len;
+
+	return 0;
+}
+
+static int ccp_sha_cra_init(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+
+	ctx->complete = ccp_sha_complete;
+	ctx->u.sha.key_len = 0;
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx));
+
+	return 0;
+}
+
+static void ccp_sha_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
+	struct crypto_shash *hmac_tfm;
+
+	hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0);
+	if (IS_ERR(hmac_tfm)) {
+		pr_warn("could not load driver %s need for HMAC support\n",
+			alg->child_alg);
+		return PTR_ERR(hmac_tfm);
+	}
+
+	ctx->u.sha.hmac_tfm = hmac_tfm;
+
+	return ccp_sha_cra_init(tfm);
+}
+
+static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->u.sha.hmac_tfm)
+		crypto_free_shash(ctx->u.sha.hmac_tfm);
+
+	ccp_sha_cra_exit(tfm);
+}
+
+struct ccp_sha_def {
+	unsigned int version;
+	const char *name;
+	const char *drv_name;
+	enum ccp_sha_type type;
+	u32 digest_size;
+	u32 block_size;
+};
+
+static struct ccp_sha_def sha_algs[] = {
+	{
+		.version	= CCP_VERSION(3, 0),
+		.name		= "sha1",
+		.drv_name	= "sha1-ccp",
+		.type		= CCP_SHA_TYPE_1,
+		.digest_size	= SHA1_DIGEST_SIZE,
+		.block_size	= SHA1_BLOCK_SIZE,
+	},
+	{
+		.version	= CCP_VERSION(3, 0),
+		.name		= "sha224",
+		.drv_name	= "sha224-ccp",
+		.type		= CCP_SHA_TYPE_224,
+		.digest_size	= SHA224_DIGEST_SIZE,
+		.block_size	= SHA224_BLOCK_SIZE,
+	},
+	{
+		.version	= CCP_VERSION(3, 0),
+		.name		= "sha256",
+		.drv_name	= "sha256-ccp",
+		.type		= CCP_SHA_TYPE_256,
+		.digest_size	= SHA256_DIGEST_SIZE,
+		.block_size	= SHA256_BLOCK_SIZE,
+	},
+	{
+		.version	= CCP_VERSION(5, 0),
+		.name		= "sha384",
+		.drv_name	= "sha384-ccp",
+		.type		= CCP_SHA_TYPE_384,
+		.digest_size	= SHA384_DIGEST_SIZE,
+		.block_size	= SHA384_BLOCK_SIZE,
+	},
+	{
+		.version	= CCP_VERSION(5, 0),
+		.name		= "sha512",
+		.drv_name	= "sha512-ccp",
+		.type		= CCP_SHA_TYPE_512,
+		.digest_size	= SHA512_DIGEST_SIZE,
+		.block_size	= SHA512_BLOCK_SIZE,
+	},
+};
+
+static int ccp_register_hmac_alg(struct list_head *head,
+				 const struct ccp_sha_def *def,
+				 const struct ccp_crypto_ahash_alg *base_alg)
+{
+	struct ccp_crypto_ahash_alg *ccp_alg;
+	struct ahash_alg *alg;
+	struct hash_alg_common *halg;
+	struct crypto_alg *base;
+	int ret;
+
+	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+	if (!ccp_alg)
+		return -ENOMEM;
+
+	/* Copy the base algorithm and only change what's necessary */
+	*ccp_alg = *base_alg;
+	INIT_LIST_HEAD(&ccp_alg->entry);
+
+	strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
+
+	alg = &ccp_alg->alg;
+	alg->setkey = ccp_sha_setkey;
+
+	halg = &alg->halg;
+
+	base = &halg->base;
+	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name);
+	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s",
+		 def->drv_name);
+	base->cra_init = ccp_hmac_sha_cra_init;
+	base->cra_exit = ccp_hmac_sha_cra_exit;
+
+	ret = crypto_register_ahash(alg);
+	if (ret) {
+		pr_err("%s ahash algorithm registration error (%d)\n",
+		       base->cra_name, ret);
+		kfree(ccp_alg);
+		return ret;
+	}
+
+	list_add(&ccp_alg->entry, head);
+
+	return ret;
+}
+
+static int ccp_register_sha_alg(struct list_head *head,
+				const struct ccp_sha_def *def)
+{
+	struct ccp_crypto_ahash_alg *ccp_alg;
+	struct ahash_alg *alg;
+	struct hash_alg_common *halg;
+	struct crypto_alg *base;
+	int ret;
+
+	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+	if (!ccp_alg)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_alg->entry);
+
+	ccp_alg->type = def->type;
+
+	alg = &ccp_alg->alg;
+	alg->init = ccp_sha_init;
+	alg->update = ccp_sha_update;
+	alg->final = ccp_sha_final;
+	alg->finup = ccp_sha_finup;
+	alg->digest = ccp_sha_digest;
+	alg->export = ccp_sha_export;
+	alg->import = ccp_sha_import;
+
+	halg = &alg->halg;
+	halg->digestsize = def->digest_size;
+	halg->statesize = sizeof(struct ccp_sha_exp_ctx);
+
+	base = &halg->base;
+	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->drv_name);
+	base->cra_flags = CRYPTO_ALG_ASYNC |
+			  CRYPTO_ALG_KERN_DRIVER_ONLY |
+			  CRYPTO_ALG_NEED_FALLBACK;
+	base->cra_blocksize = def->block_size;
+	base->cra_ctxsize = sizeof(struct ccp_ctx);
+	base->cra_priority = CCP_CRA_PRIORITY;
+	base->cra_init = ccp_sha_cra_init;
+	base->cra_exit = ccp_sha_cra_exit;
+	base->cra_module = THIS_MODULE;
+
+	ret = crypto_register_ahash(alg);
+	if (ret) {
+		pr_err("%s ahash algorithm registration error (%d)\n",
+		       base->cra_name, ret);
+		kfree(ccp_alg);
+		return ret;
+	}
+
+	list_add(&ccp_alg->entry, head);
+
+	ret = ccp_register_hmac_alg(head, def, ccp_alg);
+
+	return ret;
+}
+
+int ccp_register_sha_algs(struct list_head *head)
+{
+	int i, ret;
+	unsigned int ccpversion = ccp_version();
+
+	for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+		if (sha_algs[i].version > ccpversion)
+			continue;
+		ret = ccp_register_sha_alg(head, &sha_algs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
new file mode 100644
index 0000000..b9fd090
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -0,0 +1,285 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) crypto API support
+ *
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CCP_CRYPTO_H__
+#define __CCP_CRYPTO_H__
+
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/ccp.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/internal/aead.h>
+#include <crypto/aead.h>
+#include <crypto/ctr.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/rsa.h>
+
+#define	CCP_LOG_LEVEL	KERN_INFO
+
+#define CCP_CRA_PRIORITY	300
+
+struct ccp_crypto_ablkcipher_alg {
+	struct list_head entry;
+
+	u32 mode;
+
+	struct crypto_alg alg;
+};
+
+struct ccp_crypto_aead {
+	struct list_head entry;
+
+	u32 mode;
+
+	struct aead_alg alg;
+};
+
+struct ccp_crypto_ahash_alg {
+	struct list_head entry;
+
+	const __be32 *init;
+	u32 type;
+	u32 mode;
+
+	/* Child algorithm used for HMAC, CMAC, etc */
+	char child_alg[CRYPTO_MAX_ALG_NAME];
+
+	struct ahash_alg alg;
+};
+
+struct ccp_crypto_akcipher_alg {
+	struct list_head entry;
+
+	struct akcipher_alg alg;
+};
+
+static inline struct ccp_crypto_ablkcipher_alg *
+	ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+
+	return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg);
+}
+
+static inline struct ccp_crypto_ahash_alg *
+	ccp_crypto_ahash_alg(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct ahash_alg *ahash_alg;
+
+	ahash_alg = container_of(alg, struct ahash_alg, halg.base);
+
+	return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg);
+}
+
+/***** AES related defines *****/
+struct ccp_aes_ctx {
+	/* Fallback cipher for XTS with unsupported unit sizes */
+	struct crypto_skcipher *tfm_skcipher;
+
+	/* Cipher used to generate CMAC K1/K2 keys */
+	struct crypto_cipher *tfm_cipher;
+
+	enum ccp_engine engine;
+	enum ccp_aes_type type;
+	enum ccp_aes_mode mode;
+
+	struct scatterlist key_sg;
+	unsigned int key_len;
+	u8 key[AES_MAX_KEY_SIZE * 2];
+
+	u8 nonce[CTR_RFC3686_NONCE_SIZE];
+
+	/* CMAC key structures */
+	struct scatterlist k1_sg;
+	struct scatterlist k2_sg;
+	unsigned int kn_len;
+	u8 k1[AES_BLOCK_SIZE];
+	u8 k2[AES_BLOCK_SIZE];
+};
+
+struct ccp_aes_req_ctx {
+	struct scatterlist iv_sg;
+	u8 iv[AES_BLOCK_SIZE];
+
+	struct scatterlist tag_sg;
+	u8 tag[AES_BLOCK_SIZE];
+
+	/* Fields used for RFC3686 requests */
+	u8 *rfc3686_info;
+	u8 rfc3686_iv[AES_BLOCK_SIZE];
+
+	struct ccp_cmd cmd;
+};
+
+struct ccp_aes_cmac_req_ctx {
+	unsigned int null_msg;
+	unsigned int final;
+
+	struct scatterlist *src;
+	unsigned int nbytes;
+
+	u64 hash_cnt;
+	unsigned int hash_rem;
+
+	struct sg_table data_sg;
+
+	struct scatterlist iv_sg;
+	u8 iv[AES_BLOCK_SIZE];
+
+	struct scatterlist buf_sg;
+	unsigned int buf_count;
+	u8 buf[AES_BLOCK_SIZE];
+
+	struct scatterlist pad_sg;
+	unsigned int pad_count;
+	u8 pad[AES_BLOCK_SIZE];
+
+	struct ccp_cmd cmd;
+};
+
+struct ccp_aes_cmac_exp_ctx {
+	unsigned int null_msg;
+
+	u8 iv[AES_BLOCK_SIZE];
+
+	unsigned int buf_count;
+	u8 buf[AES_BLOCK_SIZE];
+};
+
+/***** 3DES related defines *****/
+struct ccp_des3_ctx {
+	enum ccp_engine engine;
+	enum ccp_des3_type type;
+	enum ccp_des3_mode mode;
+
+	struct scatterlist key_sg;
+	unsigned int key_len;
+	u8 key[AES_MAX_KEY_SIZE];
+};
+
+struct ccp_des3_req_ctx {
+	struct scatterlist iv_sg;
+	u8 iv[AES_BLOCK_SIZE];
+
+	struct ccp_cmd cmd;
+};
+
+/* SHA-related defines
+ * These values must be large enough to accommodate any variant
+ */
+#define MAX_SHA_CONTEXT_SIZE	SHA512_DIGEST_SIZE
+#define MAX_SHA_BLOCK_SIZE	SHA512_BLOCK_SIZE
+
+struct ccp_sha_ctx {
+	struct scatterlist opad_sg;
+	unsigned int opad_count;
+
+	unsigned int key_len;
+	u8 key[MAX_SHA_BLOCK_SIZE];
+	u8 ipad[MAX_SHA_BLOCK_SIZE];
+	u8 opad[MAX_SHA_BLOCK_SIZE];
+	struct crypto_shash *hmac_tfm;
+};
+
+struct ccp_sha_req_ctx {
+	enum ccp_sha_type type;
+
+	u64 msg_bits;
+
+	unsigned int first;
+	unsigned int final;
+
+	struct scatterlist *src;
+	unsigned int nbytes;
+
+	u64 hash_cnt;
+	unsigned int hash_rem;
+
+	struct sg_table data_sg;
+
+	struct scatterlist ctx_sg;
+	u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+	struct scatterlist buf_sg;
+	unsigned int buf_count;
+	u8 buf[MAX_SHA_BLOCK_SIZE];
+
+	/* CCP driver command */
+	struct ccp_cmd cmd;
+};
+
+struct ccp_sha_exp_ctx {
+	enum ccp_sha_type type;
+
+	u64 msg_bits;
+
+	unsigned int first;
+
+	u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+	unsigned int buf_count;
+	u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
+/***** RSA related defines *****/
+
+struct ccp_rsa_ctx {
+	unsigned int key_len; /* in bits */
+	struct scatterlist e_sg;
+	u8 *e_buf;
+	unsigned int e_len;
+	struct scatterlist n_sg;
+	u8 *n_buf;
+	unsigned int n_len;
+	struct scatterlist d_sg;
+	u8 *d_buf;
+	unsigned int d_len;
+};
+
+struct ccp_rsa_req_ctx {
+	struct ccp_cmd cmd;
+};
+
+#define	CCP_RSA_MAXMOD	(4 * 1024 / 8)
+#define	CCP5_RSA_MAXMOD	(16 * 1024 / 8)
+
+/***** Common Context Structure *****/
+struct ccp_ctx {
+	int (*complete)(struct crypto_async_request *req, int ret);
+
+	union {
+		struct ccp_aes_ctx aes;
+		struct ccp_rsa_ctx rsa;
+		struct ccp_sha_ctx sha;
+		struct ccp_des3_ctx des3;
+	} u;
+};
+
+int ccp_crypto_enqueue_request(struct crypto_async_request *req,
+			       struct ccp_cmd *cmd);
+struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
+					    struct scatterlist *sg_add);
+
+int ccp_register_aes_algs(struct list_head *head);
+int ccp_register_aes_cmac_algs(struct list_head *head);
+int ccp_register_aes_xts_algs(struct list_head *head);
+int ccp_register_aes_aeads(struct list_head *head);
+int ccp_register_sha_algs(struct list_head *head);
+int ccp_register_des3_algs(struct list_head *head);
+int ccp_register_rsa_algs(struct list_head *head);
+
+#endif
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
new file mode 100644
index 0000000..1a734bd
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -0,0 +1,348 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+/* DebugFS helpers */
+#define	OBUFP		(obuf + oboff)
+#define	OBUFLEN		512
+#define	OBUFSPC		(OBUFLEN - oboff)
+#define	OSCNPRINTF(fmt, ...) \
+		scnprintf(OBUFP, OBUFSPC, fmt, ## __VA_ARGS__)
+
+#define BUFLEN	63
+
+#define	RI_VERSION_NUM	0x0000003F
+#define	RI_AES_PRESENT	0x00000040
+#define	RI_3DES_PRESENT	0x00000080
+#define	RI_SHA_PRESENT	0x00000100
+#define	RI_RSA_PRESENT	0x00000200
+#define	RI_ECC_PRESENT	0x00000400
+#define	RI_ZDE_PRESENT	0x00000800
+#define	RI_ZCE_PRESENT	0x00001000
+#define	RI_TRNG_PRESENT	0x00002000
+#define	RI_ELFC_PRESENT	0x00004000
+#define	RI_ELFC_SHIFT	14
+#define	RI_NUM_VQM	0x00078000
+#define	RI_NVQM_SHIFT	15
+#define	RI_NVQM(r)	(((r) * RI_NUM_VQM) >> RI_NVQM_SHIFT)
+#define	RI_LSB_ENTRIES	0x0FF80000
+#define	RI_NLSB_SHIFT	19
+#define	RI_NLSB(r)	(((r) * RI_LSB_ENTRIES) >> RI_NLSB_SHIFT)
+
+static ssize_t ccp5_debugfs_info_read(struct file *filp, char __user *ubuf,
+				      size_t count, loff_t *offp)
+{
+	struct ccp_device *ccp = filp->private_data;
+	unsigned int oboff = 0;
+	unsigned int regval;
+	ssize_t ret;
+	char *obuf;
+
+	if (!ccp)
+		return 0;
+
+	obuf = kmalloc(OBUFLEN, GFP_KERNEL);
+	if (!obuf)
+		return -ENOMEM;
+
+	oboff += OSCNPRINTF("Device name: %s\n", ccp->name);
+	oboff += OSCNPRINTF("   RNG name: %s\n", ccp->rngname);
+	oboff += OSCNPRINTF("   # Queues: %d\n", ccp->cmd_q_count);
+	oboff += OSCNPRINTF("     # Cmds: %d\n", ccp->cmd_count);
+
+	regval = ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION);
+	oboff += OSCNPRINTF("    Version: %d\n", regval & RI_VERSION_NUM);
+	oboff += OSCNPRINTF("    Engines:");
+	if (regval & RI_AES_PRESENT)
+		oboff += OSCNPRINTF(" AES");
+	if (regval & RI_3DES_PRESENT)
+		oboff += OSCNPRINTF(" 3DES");
+	if (regval & RI_SHA_PRESENT)
+		oboff += OSCNPRINTF(" SHA");
+	if (regval & RI_RSA_PRESENT)
+		oboff += OSCNPRINTF(" RSA");
+	if (regval & RI_ECC_PRESENT)
+		oboff += OSCNPRINTF(" ECC");
+	if (regval & RI_ZDE_PRESENT)
+		oboff += OSCNPRINTF(" ZDE");
+	if (regval & RI_ZCE_PRESENT)
+		oboff += OSCNPRINTF(" ZCE");
+	if (regval & RI_TRNG_PRESENT)
+		oboff += OSCNPRINTF(" TRNG");
+	oboff += OSCNPRINTF("\n");
+	oboff += OSCNPRINTF("     Queues: %d\n",
+		   (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT);
+	oboff += OSCNPRINTF("LSB Entries: %d\n",
+		   (regval & RI_LSB_ENTRIES) >> RI_NLSB_SHIFT);
+
+	ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff);
+	kfree(obuf);
+
+	return ret;
+}
+
+/* Return a formatted buffer containing the current
+ * statistics across all queues for a CCP.
+ */
+static ssize_t ccp5_debugfs_stats_read(struct file *filp, char __user *ubuf,
+				       size_t count, loff_t *offp)
+{
+	struct ccp_device *ccp = filp->private_data;
+	unsigned long total_xts_aes_ops = 0;
+	unsigned long total_3des_ops = 0;
+	unsigned long total_aes_ops = 0;
+	unsigned long total_sha_ops = 0;
+	unsigned long total_rsa_ops = 0;
+	unsigned long total_ecc_ops = 0;
+	unsigned long total_pt_ops = 0;
+	unsigned long total_ops = 0;
+	unsigned int oboff = 0;
+	ssize_t ret = 0;
+	unsigned int i;
+	char *obuf;
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+		total_ops += cmd_q->total_ops;
+		total_aes_ops += cmd_q->total_aes_ops;
+		total_xts_aes_ops += cmd_q->total_xts_aes_ops;
+		total_3des_ops += cmd_q->total_3des_ops;
+		total_sha_ops += cmd_q->total_sha_ops;
+		total_rsa_ops += cmd_q->total_rsa_ops;
+		total_pt_ops += cmd_q->total_pt_ops;
+		total_ecc_ops += cmd_q->total_ecc_ops;
+	}
+
+	obuf = kmalloc(OBUFLEN, GFP_KERNEL);
+	if (!obuf)
+		return -ENOMEM;
+
+	oboff += OSCNPRINTF("Total Interrupts Handled: %ld\n",
+			    ccp->total_interrupts);
+	oboff += OSCNPRINTF("        Total Operations: %ld\n",
+			    total_ops);
+	oboff += OSCNPRINTF("                     AES: %ld\n",
+			    total_aes_ops);
+	oboff += OSCNPRINTF("                 XTS AES: %ld\n",
+			    total_xts_aes_ops);
+	oboff += OSCNPRINTF("                     SHA: %ld\n",
+			    total_3des_ops);
+	oboff += OSCNPRINTF("                     SHA: %ld\n",
+			    total_sha_ops);
+	oboff += OSCNPRINTF("                     RSA: %ld\n",
+			    total_rsa_ops);
+	oboff += OSCNPRINTF("               Pass-Thru: %ld\n",
+			    total_pt_ops);
+	oboff += OSCNPRINTF("                     ECC: %ld\n",
+			    total_ecc_ops);
+
+	ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff);
+	kfree(obuf);
+
+	return ret;
+}
+
+/* Reset the counters in a queue
+ */
+static void ccp5_debugfs_reset_queue_stats(struct ccp_cmd_queue *cmd_q)
+{
+	cmd_q->total_ops = 0L;
+	cmd_q->total_aes_ops = 0L;
+	cmd_q->total_xts_aes_ops = 0L;
+	cmd_q->total_3des_ops = 0L;
+	cmd_q->total_sha_ops = 0L;
+	cmd_q->total_rsa_ops = 0L;
+	cmd_q->total_pt_ops = 0L;
+	cmd_q->total_ecc_ops = 0L;
+}
+
+/* A value was written to the stats variable, which
+ * should be used to reset the queue counters across
+ * that device.
+ */
+static ssize_t ccp5_debugfs_stats_write(struct file *filp,
+					const char __user *ubuf,
+					size_t count, loff_t *offp)
+{
+	struct ccp_device *ccp = filp->private_data;
+	int i;
+
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		ccp5_debugfs_reset_queue_stats(&ccp->cmd_q[i]);
+	ccp->total_interrupts = 0L;
+
+	return count;
+}
+
+/* Return a formatted buffer containing the current information
+ * for that queue
+ */
+static ssize_t ccp5_debugfs_queue_read(struct file *filp, char __user *ubuf,
+				       size_t count, loff_t *offp)
+{
+	struct ccp_cmd_queue *cmd_q = filp->private_data;
+	unsigned int oboff = 0;
+	unsigned int regval;
+	ssize_t ret;
+	char *obuf;
+
+	if (!cmd_q)
+		return 0;
+
+	obuf = kmalloc(OBUFLEN, GFP_KERNEL);
+	if (!obuf)
+		return -ENOMEM;
+
+	oboff += OSCNPRINTF("  Total Queue Operations: %ld\n",
+			    cmd_q->total_ops);
+	oboff += OSCNPRINTF("                     AES: %ld\n",
+			    cmd_q->total_aes_ops);
+	oboff += OSCNPRINTF("                 XTS AES: %ld\n",
+			    cmd_q->total_xts_aes_ops);
+	oboff += OSCNPRINTF("                     SHA: %ld\n",
+			    cmd_q->total_3des_ops);
+	oboff += OSCNPRINTF("                     SHA: %ld\n",
+			    cmd_q->total_sha_ops);
+	oboff += OSCNPRINTF("                     RSA: %ld\n",
+			    cmd_q->total_rsa_ops);
+	oboff += OSCNPRINTF("               Pass-Thru: %ld\n",
+			    cmd_q->total_pt_ops);
+	oboff += OSCNPRINTF("                     ECC: %ld\n",
+			    cmd_q->total_ecc_ops);
+
+	regval = ioread32(cmd_q->reg_int_enable);
+	oboff += OSCNPRINTF("      Enabled Interrupts:");
+	if (regval & INT_EMPTY_QUEUE)
+		oboff += OSCNPRINTF(" EMPTY");
+	if (regval & INT_QUEUE_STOPPED)
+		oboff += OSCNPRINTF(" STOPPED");
+	if (regval & INT_ERROR)
+		oboff += OSCNPRINTF(" ERROR");
+	if (regval & INT_COMPLETION)
+		oboff += OSCNPRINTF(" COMPLETION");
+	oboff += OSCNPRINTF("\n");
+
+	ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff);
+	kfree(obuf);
+
+	return ret;
+}
+
+/* A value was written to the stats variable for a
+ * queue. Reset the queue counters to this value.
+ */
+static ssize_t ccp5_debugfs_queue_write(struct file *filp,
+					const char __user *ubuf,
+					size_t count, loff_t *offp)
+{
+	struct ccp_cmd_queue *cmd_q = filp->private_data;
+
+	ccp5_debugfs_reset_queue_stats(cmd_q);
+
+	return count;
+}
+
+static const struct file_operations ccp_debugfs_info_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = ccp5_debugfs_info_read,
+	.write = NULL,
+};
+
+static const struct file_operations ccp_debugfs_queue_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = ccp5_debugfs_queue_read,
+	.write = ccp5_debugfs_queue_write,
+};
+
+static const struct file_operations ccp_debugfs_stats_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = ccp5_debugfs_stats_read,
+	.write = ccp5_debugfs_stats_write,
+};
+
+static struct dentry *ccp_debugfs_dir;
+static DEFINE_MUTEX(ccp_debugfs_lock);
+
+#define	MAX_NAME_LEN	20
+
+void ccp5_debugfs_setup(struct ccp_device *ccp)
+{
+	struct ccp_cmd_queue *cmd_q;
+	char name[MAX_NAME_LEN + 1];
+	struct dentry *debugfs_info;
+	struct dentry *debugfs_stats;
+	struct dentry *debugfs_q_instance;
+	struct dentry *debugfs_q_stats;
+	int i;
+
+	if (!debugfs_initialized())
+		return;
+
+	mutex_lock(&ccp_debugfs_lock);
+	if (!ccp_debugfs_dir)
+		ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	mutex_unlock(&ccp_debugfs_lock);
+	if (!ccp_debugfs_dir)
+		return;
+
+	ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir);
+	if (!ccp->debugfs_instance)
+		goto err;
+
+	debugfs_info = debugfs_create_file("info", 0400,
+					   ccp->debugfs_instance, ccp,
+					   &ccp_debugfs_info_ops);
+	if (!debugfs_info)
+		goto err;
+
+	debugfs_stats = debugfs_create_file("stats", 0600,
+					    ccp->debugfs_instance, ccp,
+					    &ccp_debugfs_stats_ops);
+	if (!debugfs_stats)
+		goto err;
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		snprintf(name, MAX_NAME_LEN - 1, "q%d", cmd_q->id);
+
+		debugfs_q_instance =
+			debugfs_create_dir(name, ccp->debugfs_instance);
+		if (!debugfs_q_instance)
+			goto err;
+
+		debugfs_q_stats =
+			debugfs_create_file("stats", 0600,
+					    debugfs_q_instance, cmd_q,
+					    &ccp_debugfs_queue_ops);
+		if (!debugfs_q_stats)
+			goto err;
+	}
+
+	return;
+
+err:
+	debugfs_remove_recursive(ccp->debugfs_instance);
+}
+
+void ccp5_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(ccp_debugfs_dir);
+}
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
new file mode 100644
index 0000000..240bebb
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -0,0 +1,601 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
+{
+	int start;
+	struct ccp_device *ccp = cmd_q->ccp;
+
+	for (;;) {
+		mutex_lock(&ccp->sb_mutex);
+
+		start = (u32)bitmap_find_next_zero_area(ccp->sb,
+							ccp->sb_count,
+							ccp->sb_start,
+							count, 0);
+		if (start <= ccp->sb_count) {
+			bitmap_set(ccp->sb, start, count);
+
+			mutex_unlock(&ccp->sb_mutex);
+			break;
+		}
+
+		ccp->sb_avail = 0;
+
+		mutex_unlock(&ccp->sb_mutex);
+
+		/* Wait for KSB entries to become available */
+		if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
+			return 0;
+	}
+
+	return KSB_START + start;
+}
+
+static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
+			 unsigned int count)
+{
+	struct ccp_device *ccp = cmd_q->ccp;
+
+	if (!start)
+		return;
+
+	mutex_lock(&ccp->sb_mutex);
+
+	bitmap_clear(ccp->sb, start - KSB_START, count);
+
+	ccp->sb_avail = 1;
+
+	mutex_unlock(&ccp->sb_mutex);
+
+	wake_up_interruptible_all(&ccp->sb_queue);
+}
+
+static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
+{
+	return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+}
+
+static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
+{
+	struct ccp_cmd_queue *cmd_q = op->cmd_q;
+	struct ccp_device *ccp = cmd_q->ccp;
+	void __iomem *cr_addr;
+	u32 cr0, cmd;
+	unsigned int i;
+	int ret = 0;
+
+	/* We could read a status register to see how many free slots
+	 * are actually available, but reading that register resets it
+	 * and you could lose some error information.
+	 */
+	cmd_q->free_slots--;
+
+	cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
+	      | (op->jobid << REQ0_JOBID_SHIFT)
+	      | REQ0_WAIT_FOR_WRITE;
+
+	if (op->soc)
+		cr0 |= REQ0_STOP_ON_COMPLETE
+		       | REQ0_INT_ON_COMPLETE;
+
+	if (op->ioc || !cmd_q->free_slots)
+		cr0 |= REQ0_INT_ON_COMPLETE;
+
+	/* Start at CMD_REQ1 */
+	cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
+
+	mutex_lock(&ccp->req_mutex);
+
+	/* Write CMD_REQ1 through CMD_REQx first */
+	for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
+		iowrite32(*(cr + i), cr_addr);
+
+	/* Tell the CCP to start */
+	wmb();
+	iowrite32(cr0, ccp->io_regs + CMD_REQ0);
+
+	mutex_unlock(&ccp->req_mutex);
+
+	if (cr0 & REQ0_INT_ON_COMPLETE) {
+		/* Wait for the job to complete */
+		ret = wait_event_interruptible(cmd_q->int_queue,
+					       cmd_q->int_rcvd);
+		if (ret || cmd_q->cmd_error) {
+			/* On error delete all related jobs from the queue */
+			cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
+			      | op->jobid;
+			if (cmd_q->cmd_error)
+				ccp_log_error(cmd_q->ccp,
+					      cmd_q->cmd_error);
+
+			iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+
+			if (!ret)
+				ret = -EIO;
+		} else if (op->soc) {
+			/* Delete just head job from the queue on SoC */
+			cmd = DEL_Q_ACTIVE
+			      | (cmd_q->id << DEL_Q_ID_SHIFT)
+			      | op->jobid;
+
+			iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+		}
+
+		cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
+
+		cmd_q->int_rcvd = 0;
+	}
+
+	return ret;
+}
+
+static int ccp_perform_aes(struct ccp_op *op)
+{
+	u32 cr[6];
+
+	/* Fill out the register contents for REQ1 through REQ6 */
+	cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
+		| (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
+		| (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
+		| (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
+		| (op->sb_key << REQ1_KEY_KSB_SHIFT);
+	cr[1] = op->src.u.dma.length - 1;
+	cr[2] = ccp_addr_lo(&op->src.u.dma);
+	cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
+		| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->src.u.dma);
+	cr[4] = ccp_addr_lo(&op->dst.u.dma);
+	cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->dst.u.dma);
+
+	if (op->u.aes.mode == CCP_AES_MODE_CFB)
+		cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
+
+	if (op->eom)
+		cr[0] |= REQ1_EOM;
+
+	if (op->init)
+		cr[0] |= REQ1_INIT;
+
+	return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_xts_aes(struct ccp_op *op)
+{
+	u32 cr[6];
+
+	/* Fill out the register contents for REQ1 through REQ6 */
+	cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
+		| (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
+		| (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
+		| (op->sb_key << REQ1_KEY_KSB_SHIFT);
+	cr[1] = op->src.u.dma.length - 1;
+	cr[2] = ccp_addr_lo(&op->src.u.dma);
+	cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
+		| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->src.u.dma);
+	cr[4] = ccp_addr_lo(&op->dst.u.dma);
+	cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->dst.u.dma);
+
+	if (op->eom)
+		cr[0] |= REQ1_EOM;
+
+	if (op->init)
+		cr[0] |= REQ1_INIT;
+
+	return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_sha(struct ccp_op *op)
+{
+	u32 cr[6];
+
+	/* Fill out the register contents for REQ1 through REQ6 */
+	cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
+		| (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
+		| REQ1_INIT;
+	cr[1] = op->src.u.dma.length - 1;
+	cr[2] = ccp_addr_lo(&op->src.u.dma);
+	cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
+		| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->src.u.dma);
+
+	if (op->eom) {
+		cr[0] |= REQ1_EOM;
+		cr[4] = lower_32_bits(op->u.sha.msg_bits);
+		cr[5] = upper_32_bits(op->u.sha.msg_bits);
+	} else {
+		cr[4] = 0;
+		cr[5] = 0;
+	}
+
+	return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_rsa(struct ccp_op *op)
+{
+	u32 cr[6];
+
+	/* Fill out the register contents for REQ1 through REQ6 */
+	cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
+		| (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
+		| (op->sb_key << REQ1_KEY_KSB_SHIFT)
+		| REQ1_EOM;
+	cr[1] = op->u.rsa.input_len - 1;
+	cr[2] = ccp_addr_lo(&op->src.u.dma);
+	cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
+		| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->src.u.dma);
+	cr[4] = ccp_addr_lo(&op->dst.u.dma);
+	cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->dst.u.dma);
+
+	return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_passthru(struct ccp_op *op)
+{
+	u32 cr[6];
+
+	/* Fill out the register contents for REQ1 through REQ6 */
+	cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
+		| (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
+		| (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
+
+	if (op->src.type == CCP_MEMTYPE_SYSTEM)
+		cr[1] = op->src.u.dma.length - 1;
+	else
+		cr[1] = op->dst.u.dma.length - 1;
+
+	if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+		cr[2] = ccp_addr_lo(&op->src.u.dma);
+		cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+			| ccp_addr_hi(&op->src.u.dma);
+
+		if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
+	} else {
+		cr[2] = op->src.u.sb * CCP_SB_BYTES;
+		cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
+	}
+
+	if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+		cr[4] = ccp_addr_lo(&op->dst.u.dma);
+		cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+			| ccp_addr_hi(&op->dst.u.dma);
+	} else {
+		cr[4] = op->dst.u.sb * CCP_SB_BYTES;
+		cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
+	}
+
+	if (op->eom)
+		cr[0] |= REQ1_EOM;
+
+	return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_ecc(struct ccp_op *op)
+{
+	u32 cr[6];
+
+	/* Fill out the register contents for REQ1 through REQ6 */
+	cr[0] = REQ1_ECC_AFFINE_CONVERT
+		| (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
+		| (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
+		| REQ1_EOM;
+	cr[1] = op->src.u.dma.length - 1;
+	cr[2] = ccp_addr_lo(&op->src.u.dma);
+	cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->src.u.dma);
+	cr[4] = ccp_addr_lo(&op->dst.u.dma);
+	cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+		| ccp_addr_hi(&op->dst.u.dma);
+
+	return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
+{
+	iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+}
+
+static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
+{
+	iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
+}
+
+static void ccp_irq_bh(unsigned long data)
+{
+	struct ccp_device *ccp = (struct ccp_device *)data;
+	struct ccp_cmd_queue *cmd_q;
+	u32 q_int, status;
+	unsigned int i;
+
+	status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+		if (q_int) {
+			cmd_q->int_status = status;
+			cmd_q->q_status = ioread32(cmd_q->reg_status);
+			cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+			/* On error, only save the first error value */
+			if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+				cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+			cmd_q->int_rcvd = 1;
+
+			/* Acknowledge the interrupt and wake the kthread */
+			iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+			wake_up_interruptible(&cmd_q->int_queue);
+		}
+	}
+	ccp_enable_queue_interrupts(ccp);
+}
+
+static irqreturn_t ccp_irq_handler(int irq, void *data)
+{
+	struct ccp_device *ccp = (struct ccp_device *)data;
+
+	ccp_disable_queue_interrupts(ccp);
+	if (ccp->use_tasklet)
+		tasklet_schedule(&ccp->irq_tasklet);
+	else
+		ccp_irq_bh((unsigned long)ccp);
+
+	return IRQ_HANDLED;
+}
+
+static int ccp_init(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+	struct ccp_cmd_queue *cmd_q;
+	struct dma_pool *dma_pool;
+	char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+	unsigned int qmr, i;
+	int ret;
+
+	/* Find available queues */
+	ccp->qim = 0;
+	qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+		if (!(qmr & (1 << i)))
+			continue;
+
+		/* Allocate a dma pool for this queue */
+		snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
+			 ccp->name, i);
+		dma_pool = dma_pool_create(dma_pool_name, dev,
+					   CCP_DMAPOOL_MAX_SIZE,
+					   CCP_DMAPOOL_ALIGN, 0);
+		if (!dma_pool) {
+			dev_err(dev, "unable to allocate dma pool\n");
+			ret = -ENOMEM;
+			goto e_pool;
+		}
+
+		cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+		ccp->cmd_q_count++;
+
+		cmd_q->ccp = ccp;
+		cmd_q->id = i;
+		cmd_q->dma_pool = dma_pool;
+
+		/* Reserve 2 KSB regions for the queue */
+		cmd_q->sb_key = KSB_START + ccp->sb_start++;
+		cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
+		ccp->sb_count -= 2;
+
+		/* Preset some register values and masks that are queue
+		 * number dependent
+		 */
+		cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
+				    (CMD_Q_STATUS_INCR * i);
+		cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
+					(CMD_Q_STATUS_INCR * i);
+		cmd_q->int_ok = 1 << (i * 2);
+		cmd_q->int_err = 1 << ((i * 2) + 1);
+
+		cmd_q->free_slots = ccp_get_free_slots(cmd_q);
+
+		init_waitqueue_head(&cmd_q->int_queue);
+
+		/* Build queue interrupt mask (two interrupts per queue) */
+		ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
+
+#ifdef CONFIG_ARM64
+		/* For arm64 set the recommended queue cache settings */
+		iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
+			  (CMD_Q_CACHE_INC * i));
+#endif
+
+		dev_dbg(dev, "queue #%u available\n", i);
+	}
+	if (ccp->cmd_q_count == 0) {
+		dev_notice(dev, "no command queues available\n");
+		ret = -EIO;
+		goto e_pool;
+	}
+	dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+	/* Disable and clear interrupts until ready */
+	ccp_disable_queue_interrupts(ccp);
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		ioread32(cmd_q->reg_int_status);
+		ioread32(cmd_q->reg_status);
+	}
+	iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
+
+	/* Request an irq */
+	ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
+	if (ret) {
+		dev_err(dev, "unable to allocate an IRQ\n");
+		goto e_pool;
+	}
+
+	/* Initialize the ISR tasklet? */
+	if (ccp->use_tasklet)
+		tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
+			     (unsigned long)ccp);
+
+	dev_dbg(dev, "Starting threads...\n");
+	/* Create a kthread for each queue */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct task_struct *kthread;
+
+		cmd_q = &ccp->cmd_q[i];
+
+		kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+					 "%s-q%u", ccp->name, cmd_q->id);
+		if (IS_ERR(kthread)) {
+			dev_err(dev, "error creating queue thread (%ld)\n",
+				PTR_ERR(kthread));
+			ret = PTR_ERR(kthread);
+			goto e_kthread;
+		}
+
+		cmd_q->kthread = kthread;
+		wake_up_process(kthread);
+	}
+
+	dev_dbg(dev, "Enabling interrupts...\n");
+	/* Enable interrupts */
+	ccp_enable_queue_interrupts(ccp);
+
+	dev_dbg(dev, "Registering device...\n");
+	ccp_add_device(ccp);
+
+	ret = ccp_register_rng(ccp);
+	if (ret)
+		goto e_kthread;
+
+	/* Register the DMA engine support */
+	ret = ccp_dmaengine_register(ccp);
+	if (ret)
+		goto e_hwrng;
+
+	return 0;
+
+e_hwrng:
+	ccp_unregister_rng(ccp);
+
+e_kthread:
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		if (ccp->cmd_q[i].kthread)
+			kthread_stop(ccp->cmd_q[i].kthread);
+
+	sp_free_ccp_irq(ccp->sp, ccp);
+
+e_pool:
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+	return ret;
+}
+
+static void ccp_destroy(struct ccp_device *ccp)
+{
+	struct ccp_cmd_queue *cmd_q;
+	struct ccp_cmd *cmd;
+	unsigned int i;
+
+	/* Unregister the DMA engine */
+	ccp_dmaengine_unregister(ccp);
+
+	/* Unregister the RNG */
+	ccp_unregister_rng(ccp);
+
+	/* Remove this device from the list of available units */
+	ccp_del_device(ccp);
+
+	/* Disable and clear interrupts */
+	ccp_disable_queue_interrupts(ccp);
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		ioread32(cmd_q->reg_int_status);
+		ioread32(cmd_q->reg_status);
+	}
+	iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
+
+	/* Stop the queue kthreads */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		if (ccp->cmd_q[i].kthread)
+			kthread_stop(ccp->cmd_q[i].kthread);
+
+	sp_free_ccp_irq(ccp->sp, ccp);
+
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+	/* Flush the cmd and backlog queue */
+	while (!list_empty(&ccp->cmd)) {
+		/* Invoke the callback directly with an error code */
+		cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+		list_del(&cmd->entry);
+		cmd->callback(cmd->data, -ENODEV);
+	}
+	while (!list_empty(&ccp->backlog)) {
+		/* Invoke the callback directly with an error code */
+		cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+		list_del(&cmd->entry);
+		cmd->callback(cmd->data, -ENODEV);
+	}
+}
+
+static const struct ccp_actions ccp3_actions = {
+	.aes = ccp_perform_aes,
+	.xts_aes = ccp_perform_xts_aes,
+	.des3 = NULL,
+	.sha = ccp_perform_sha,
+	.rsa = ccp_perform_rsa,
+	.passthru = ccp_perform_passthru,
+	.ecc = ccp_perform_ecc,
+	.sballoc = ccp_alloc_ksb,
+	.sbfree = ccp_free_ksb,
+	.init = ccp_init,
+	.destroy = ccp_destroy,
+	.get_free_slots = ccp_get_free_slots,
+	.irqhandler = ccp_irq_handler,
+};
+
+const struct ccp_vdata ccpv3_platform = {
+	.version = CCP_VERSION(3, 0),
+	.setup = NULL,
+	.perform = &ccp3_actions,
+	.offset = 0,
+};
+
+const struct ccp_vdata ccpv3 = {
+	.version = CCP_VERSION(3, 0),
+	.setup = NULL,
+	.perform = &ccp3_actions,
+	.offset = 0x20000,
+	.rsamax = CCP_RSA_MAX_WIDTH,
+};
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
new file mode 100644
index 0000000..44a4d27
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -0,0 +1,1126 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+/* Allocate the requested number of contiguous LSB slots
+ * from the LSB bitmap. Look in the private range for this
+ * queue first; failing that, check the public area.
+ * If no space is available, wait around.
+ * Return: first slot number
+ */
+static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
+{
+	struct ccp_device *ccp;
+	int start;
+
+	/* First look at the map for the queue */
+	if (cmd_q->lsb >= 0) {
+		start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
+							LSB_SIZE,
+							0, count, 0);
+		if (start < LSB_SIZE) {
+			bitmap_set(cmd_q->lsbmap, start, count);
+			return start + cmd_q->lsb * LSB_SIZE;
+		}
+	}
+
+	/* No joy; try to get an entry from the shared blocks */
+	ccp = cmd_q->ccp;
+	for (;;) {
+		mutex_lock(&ccp->sb_mutex);
+
+		start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
+							MAX_LSB_CNT * LSB_SIZE,
+							0,
+							count, 0);
+		if (start <= MAX_LSB_CNT * LSB_SIZE) {
+			bitmap_set(ccp->lsbmap, start, count);
+
+			mutex_unlock(&ccp->sb_mutex);
+			return start;
+		}
+
+		ccp->sb_avail = 0;
+
+		mutex_unlock(&ccp->sb_mutex);
+
+		/* Wait for KSB entries to become available */
+		if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
+			return 0;
+	}
+}
+
+/* Free a number of LSB slots from the bitmap, starting at
+ * the indicated starting slot number.
+ */
+static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
+			 unsigned int count)
+{
+	if (!start)
+		return;
+
+	if (cmd_q->lsb == start) {
+		/* An entry from the private LSB */
+		bitmap_clear(cmd_q->lsbmap, start, count);
+	} else {
+		/* From the shared LSBs */
+		struct ccp_device *ccp = cmd_q->ccp;
+
+		mutex_lock(&ccp->sb_mutex);
+		bitmap_clear(ccp->lsbmap, start, count);
+		ccp->sb_avail = 1;
+		mutex_unlock(&ccp->sb_mutex);
+		wake_up_interruptible_all(&ccp->sb_queue);
+	}
+}
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		u16 size:7;
+		u16 encrypt:1;
+		u16 mode:5;
+		u16 type:2;
+	} aes;
+	struct {
+		u16 size:7;
+		u16 encrypt:1;
+		u16 rsvd:5;
+		u16 type:2;
+	} aes_xts;
+	struct {
+		u16 size:7;
+		u16 encrypt:1;
+		u16 mode:5;
+		u16 type:2;
+	} des3;
+	struct {
+		u16 rsvd1:10;
+		u16 type:4;
+		u16 rsvd2:1;
+	} sha;
+	struct {
+		u16 mode:3;
+		u16 size:12;
+	} rsa;
+	struct {
+		u16 byteswap:2;
+		u16 bitwise:3;
+		u16 reflect:2;
+		u16 rsvd:8;
+	} pt;
+	struct  {
+		u16 rsvd:13;
+	} zlib;
+	struct {
+		u16 size:10;
+		u16 type:2;
+		u16 mode:3;
+	} ecc;
+	u16 raw;
+};
+
+#define	CCP_AES_SIZE(p)		((p)->aes.size)
+#define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
+#define	CCP_AES_MODE(p)		((p)->aes.mode)
+#define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_XTS_SIZE(p)		((p)->aes_xts.size)
+#define	CCP_XTS_TYPE(p)		((p)->aes_xts.type)
+#define	CCP_XTS_ENCRYPT(p)	((p)->aes_xts.encrypt)
+#define	CCP_DES3_SIZE(p)	((p)->des3.size)
+#define	CCP_DES3_ENCRYPT(p)	((p)->des3.encrypt)
+#define	CCP_DES3_MODE(p)	((p)->des3.mode)
+#define	CCP_DES3_TYPE(p)	((p)->des3.type)
+#define	CCP_SHA_TYPE(p)		((p)->sha.type)
+#define	CCP_RSA_SIZE(p)		((p)->rsa.size)
+#define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
+#define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
+#define	CCP_ECC_MODE(p)		((p)->ecc.mode)
+#define	CCP_ECC_AFFINE(p)	((p)->ecc.one)
+
+/* Word 0 */
+#define CCP5_CMD_DW0(p)		((p)->dw0)
+#define CCP5_CMD_SOC(p)		(CCP5_CMD_DW0(p).soc)
+#define CCP5_CMD_IOC(p)		(CCP5_CMD_DW0(p).ioc)
+#define CCP5_CMD_INIT(p)	(CCP5_CMD_DW0(p).init)
+#define CCP5_CMD_EOM(p)		(CCP5_CMD_DW0(p).eom)
+#define CCP5_CMD_FUNCTION(p)	(CCP5_CMD_DW0(p).function)
+#define CCP5_CMD_ENGINE(p)	(CCP5_CMD_DW0(p).engine)
+#define CCP5_CMD_PROT(p)	(CCP5_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP5_CMD_DW1(p)		((p)->length)
+#define CCP5_CMD_LEN(p)		(CCP5_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP5_CMD_DW2(p)		((p)->src_lo)
+#define CCP5_CMD_SRC_LO(p)	(CCP5_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP5_CMD_DW3(p)		((p)->dw3)
+#define CCP5_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
+#define CCP5_CMD_SRC_HI(p)	((p)->dw3.src_hi)
+#define CCP5_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
+#define CCP5_CMD_FIX_SRC(p)	((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP5_CMD_DW4(p)		((p)->dw4)
+#define CCP5_CMD_DST_LO(p)	(CCP5_CMD_DW4(p).dst_lo)
+#define CCP5_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
+#define CCP5_CMD_DST_HI(p)	(CCP5_CMD_DW5(p))
+#define CCP5_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
+#define CCP5_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
+#define CCP5_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
+#define CCP5_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP5_CMD_DW6(p)		((p)->key_lo)
+#define CCP5_CMD_KEY_LO(p)	(CCP5_CMD_DW6(p))
+#define CCP5_CMD_DW7(p)		((p)->dw7)
+#define CCP5_CMD_KEY_HI(p)	((p)->dw7.key_hi)
+#define CCP5_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
+
+static inline u32 low_address(unsigned long addr)
+{
+	return (u64)addr & 0x0ffffffff;
+}
+
+static inline u32 high_address(unsigned long addr)
+{
+	return ((u64)addr >> 32) & 0x00000ffff;
+}
+
+static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
+{
+	unsigned int head_idx, n;
+	u32 head_lo, queue_start;
+
+	queue_start = low_address(cmd_q->qdma_tail);
+	head_lo = ioread32(cmd_q->reg_head_lo);
+	head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
+
+	n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
+
+	return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
+}
+
+static int ccp5_do_cmd(struct ccp5_desc *desc,
+		       struct ccp_cmd_queue *cmd_q)
+{
+	u32 *mP;
+	__le32 *dP;
+	u32 tail;
+	int	i;
+	int ret = 0;
+
+	cmd_q->total_ops++;
+
+	if (CCP5_CMD_SOC(desc)) {
+		CCP5_CMD_IOC(desc) = 1;
+		CCP5_CMD_SOC(desc) = 0;
+	}
+	mutex_lock(&cmd_q->q_mutex);
+
+	mP = (u32 *) &cmd_q->qbase[cmd_q->qidx];
+	dP = (__le32 *) desc;
+	for (i = 0; i < 8; i++)
+		mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	/* The data used by this command must be flushed to memory */
+	wmb();
+
+	/* Write the new tail address back to the queue register */
+	tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+	iowrite32(tail, cmd_q->reg_tail_lo);
+
+	/* Turn the queue back on using our cached control register */
+	iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
+	mutex_unlock(&cmd_q->q_mutex);
+
+	if (CCP5_CMD_IOC(desc)) {
+		/* Wait for the job to complete */
+		ret = wait_event_interruptible(cmd_q->int_queue,
+					       cmd_q->int_rcvd);
+		if (ret || cmd_q->cmd_error) {
+			/* Log the error and flush the queue by
+			 * moving the head pointer
+			 */
+			if (cmd_q->cmd_error)
+				ccp_log_error(cmd_q->ccp,
+					      cmd_q->cmd_error);
+			iowrite32(tail, cmd_q->reg_head_lo);
+			if (!ret)
+				ret = -EIO;
+		}
+		cmd_q->int_rcvd = 0;
+	}
+
+	return ret;
+}
+
+static int ccp5_perform_aes(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+	u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+	op->cmd_q->total_aes_ops++;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = op->init;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = op->u.aes.action;
+	CCP_AES_MODE(&function) = op->u.aes.mode;
+	CCP_AES_TYPE(&function) = op->u.aes.type;
+	CCP_AES_SIZE(&function) = op->u.aes.size;
+
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
+	CCP5_CMD_KEY_HI(&desc) = 0;
+	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+	CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_xts_aes(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+	u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+	op->cmd_q->total_xts_aes_ops++;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = op->init;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_XTS_TYPE(&function) = op->u.xts.type;
+	CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
+	CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
+	CCP5_CMD_KEY_HI(&desc) =  0;
+	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+	CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_sha(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+
+	op->cmd_q->total_sha_ops++;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = 1;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = op->u.sha.type;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+	if (op->eom) {
+		CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
+		CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
+	} else {
+		CCP5_CMD_SHA_LO(&desc) = 0;
+		CCP5_CMD_SHA_HI(&desc) = 0;
+	}
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_des3(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+	u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+	op->cmd_q->total_3des_ops++;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, sizeof(struct ccp5_desc));
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = op->init;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
+	CCP_DES3_MODE(&function) = op->u.des3.mode;
+	CCP_DES3_TYPE(&function) = op->u.des3.type;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
+	CCP5_CMD_KEY_HI(&desc) = 0;
+	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+	CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_rsa(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+
+	op->cmd_q->total_rsa_ops++;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = 0;
+	CCP5_CMD_EOM(&desc) = 1;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
+
+	/* Source is from external memory */
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	/* Destination is in external memory */
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	/* Key (Exponent) is in external memory */
+	CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
+	CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
+	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_passthru(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+	struct ccp_dma_info *saddr = &op->src.u.dma;
+	struct ccp_dma_info *daddr = &op->dst.u.dma;
+
+
+	op->cmd_q->total_pt_ops++;
+
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
+
+	CCP5_CMD_SOC(&desc) = 0;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = 0;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
+	CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	/* Length of source data is always 256 bytes */
+	if (op->src.type == CCP_MEMTYPE_SYSTEM)
+		CCP5_CMD_LEN(&desc) = saddr->length;
+	else
+		CCP5_CMD_LEN(&desc) = daddr->length;
+
+	if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+		CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+		CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+		CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+		if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			CCP5_CMD_LSB_ID(&desc) = op->sb_key;
+	} else {
+		u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
+
+		CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
+		CCP5_CMD_SRC_HI(&desc) = 0;
+		CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
+	}
+
+	if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+		CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+		CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+		CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+	} else {
+		u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
+
+		CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
+		CCP5_CMD_DST_HI(&desc) = 0;
+		CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
+	}
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_ecc(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+
+	op->cmd_q->total_ecc_ops++;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, Q_DESC_SIZE);
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
+
+	CCP5_CMD_SOC(&desc) = 0;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = 0;
+	CCP5_CMD_EOM(&desc) = 1;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	function.ecc.mode = op->u.ecc.function;
+	CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
+{
+	int q_mask = 1 << cmd_q->id;
+	int queues = 0;
+	int j;
+
+	/* Build a bit mask to know which LSBs this queue has access to.
+	 * Don't bother with segment 0 as it has special privileges.
+	 */
+	for (j = 1; j < MAX_LSB_CNT; j++) {
+		if (status & q_mask)
+			bitmap_set(cmd_q->lsbmask, j, 1);
+		status >>= LSB_REGION_WIDTH;
+	}
+	queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
+	dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
+		 cmd_q->id, queues);
+
+	return queues ? 0 : -EINVAL;
+}
+
+static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+					int lsb_cnt, int n_lsbs,
+					unsigned long *lsb_pub)
+{
+	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
+	int bitno;
+	int qlsb_wgt;
+	int i;
+
+	/* For each queue:
+	 * If the count of potential LSBs available to a queue matches the
+	 * ordinal given to us in lsb_cnt:
+	 * Copy the mask of possible LSBs for this queue into "qlsb";
+	 * For each bit in qlsb, see if the corresponding bit in the
+	 * aggregation mask is set; if so, we have a match.
+	 *     If we have a match, clear the bit in the aggregation to
+	 *     mark it as no longer available.
+	 *     If there is no match, clear the bit in qlsb and keep looking.
+	 */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+		qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
+
+		if (qlsb_wgt == lsb_cnt) {
+			bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
+
+			bitno = find_first_bit(qlsb, MAX_LSB_CNT);
+			while (bitno < MAX_LSB_CNT) {
+				if (test_bit(bitno, lsb_pub)) {
+					/* We found an available LSB
+					 * that this queue can access
+					 */
+					cmd_q->lsb = bitno;
+					bitmap_clear(lsb_pub, bitno, 1);
+					dev_dbg(ccp->dev,
+						 "Queue %d gets LSB %d\n",
+						 i, bitno);
+					break;
+				}
+				bitmap_clear(qlsb, bitno, 1);
+				bitno = find_first_bit(qlsb, MAX_LSB_CNT);
+			}
+			if (bitno >= MAX_LSB_CNT)
+				return -EINVAL;
+			n_lsbs--;
+		}
+	}
+	return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared resources.
+ */
+static int ccp_assign_lsbs(struct ccp_device *ccp)
+{
+	DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
+	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
+	int n_lsbs = 0;
+	int bitno;
+	int i, lsb_cnt;
+	int rc = 0;
+
+	bitmap_zero(lsb_pub, MAX_LSB_CNT);
+
+	/* Create an aggregate bitmap to get a total count of available LSBs */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		bitmap_or(lsb_pub,
+			  lsb_pub, ccp->cmd_q[i].lsbmask,
+			  MAX_LSB_CNT);
+
+	n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
+
+	if (n_lsbs >= ccp->cmd_q_count) {
+		/* We have enough LSBS to give every queue a private LSB.
+		 * Brute force search to start with the queues that are more
+		 * constrained in LSB choice. When an LSB is privately
+		 * assigned, it is removed from the public mask.
+		 * This is an ugly N squared algorithm with some optimization.
+		 */
+		for (lsb_cnt = 1;
+		     n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+		     lsb_cnt++) {
+			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+							  lsb_pub);
+			if (rc < 0)
+				return -EINVAL;
+			n_lsbs = rc;
+		}
+	}
+
+	rc = 0;
+	/* What's left of the LSBs, according to the public mask, now become
+	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
+	 * that can't be used as a shared resource, so mark the LSB slots for
+	 * them as "in use".
+	 */
+	bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
+
+	bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
+	while (bitno < MAX_LSB_CNT) {
+		bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+		bitmap_set(qlsb, bitno, 1);
+		bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
+	}
+
+	return rc;
+}
+
+static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
+{
+	unsigned int i;
+
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
+}
+
+static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
+{
+	unsigned int i;
+
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
+}
+
+static void ccp5_irq_bh(unsigned long data)
+{
+	struct ccp_device *ccp = (struct ccp_device *)data;
+	u32 status;
+	unsigned int i;
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+		status = ioread32(cmd_q->reg_interrupt_status);
+
+		if (status) {
+			cmd_q->int_status = status;
+			cmd_q->q_status = ioread32(cmd_q->reg_status);
+			cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+			/* On error, only save the first error value */
+			if ((status & INT_ERROR) && !cmd_q->cmd_error)
+				cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+			cmd_q->int_rcvd = 1;
+
+			/* Acknowledge the interrupt and wake the kthread */
+			iowrite32(status, cmd_q->reg_interrupt_status);
+			wake_up_interruptible(&cmd_q->int_queue);
+		}
+	}
+	ccp5_enable_queue_interrupts(ccp);
+}
+
+static irqreturn_t ccp5_irq_handler(int irq, void *data)
+{
+	struct ccp_device *ccp = (struct ccp_device *)data;
+
+	ccp5_disable_queue_interrupts(ccp);
+	ccp->total_interrupts++;
+	if (ccp->use_tasklet)
+		tasklet_schedule(&ccp->irq_tasklet);
+	else
+		ccp5_irq_bh((unsigned long)ccp);
+	return IRQ_HANDLED;
+}
+
+static int ccp5_init(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+	struct ccp_cmd_queue *cmd_q;
+	struct dma_pool *dma_pool;
+	char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+	unsigned int qmr, i;
+	u64 status;
+	u32 status_lo, status_hi;
+	int ret;
+
+	/* Find available queues */
+	qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+
+		if (!(qmr & (1 << i)))
+			continue;
+
+		/* Allocate a dma pool for this queue */
+		snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
+			 ccp->name, i);
+		dma_pool = dma_pool_create(dma_pool_name, dev,
+					   CCP_DMAPOOL_MAX_SIZE,
+					   CCP_DMAPOOL_ALIGN, 0);
+		if (!dma_pool) {
+			dev_err(dev, "unable to allocate dma pool\n");
+			ret = -ENOMEM;
+		}
+
+		cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+		ccp->cmd_q_count++;
+
+		cmd_q->ccp = ccp;
+		cmd_q->id = i;
+		cmd_q->dma_pool = dma_pool;
+		mutex_init(&cmd_q->q_mutex);
+
+		/* Page alignment satisfies our needs for N <= 128 */
+		BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
+		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+		cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
+						   &cmd_q->qbase_dma,
+						   GFP_KERNEL);
+		if (!cmd_q->qbase) {
+			dev_err(dev, "unable to allocate command queue\n");
+			ret = -ENOMEM;
+			goto e_pool;
+		}
+
+		cmd_q->qidx = 0;
+		/* Preset some register values and masks that are queue
+		 * number dependent
+		 */
+		cmd_q->reg_control = ccp->io_regs +
+				     CMD5_Q_STATUS_INCR * (i + 1);
+		cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
+		cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
+		cmd_q->reg_int_enable = cmd_q->reg_control +
+					CMD5_Q_INT_ENABLE_BASE;
+		cmd_q->reg_interrupt_status = cmd_q->reg_control +
+					      CMD5_Q_INTERRUPT_STATUS_BASE;
+		cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
+		cmd_q->reg_int_status = cmd_q->reg_control +
+					CMD5_Q_INT_STATUS_BASE;
+		cmd_q->reg_dma_status = cmd_q->reg_control +
+					CMD5_Q_DMA_STATUS_BASE;
+		cmd_q->reg_dma_read_status = cmd_q->reg_control +
+					     CMD5_Q_DMA_READ_STATUS_BASE;
+		cmd_q->reg_dma_write_status = cmd_q->reg_control +
+					      CMD5_Q_DMA_WRITE_STATUS_BASE;
+
+		init_waitqueue_head(&cmd_q->int_queue);
+
+		dev_dbg(dev, "queue #%u available\n", i);
+	}
+
+	if (ccp->cmd_q_count == 0) {
+		dev_notice(dev, "no command queues available\n");
+		ret = -EIO;
+		goto e_pool;
+	}
+
+	/* Turn off the queues and disable interrupts until ready */
+	ccp5_disable_queue_interrupts(ccp);
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		cmd_q->qcontrol = 0; /* Start with nothing */
+		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+		ioread32(cmd_q->reg_int_status);
+		ioread32(cmd_q->reg_status);
+
+		/* Clear the interrupt status */
+		iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+	}
+
+	dev_dbg(dev, "Requesting an IRQ...\n");
+	/* Request an irq */
+	ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
+	if (ret) {
+		dev_err(dev, "unable to allocate an IRQ\n");
+		goto e_pool;
+	}
+	/* Initialize the ISR tasklet */
+	if (ccp->use_tasklet)
+		tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
+			     (unsigned long)ccp);
+
+	dev_dbg(dev, "Loading LSB map...\n");
+	/* Copy the private LSB mask to the public registers */
+	status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
+	status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
+	iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
+	iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
+	status = ((u64)status_hi<<30) | (u64)status_lo;
+
+	dev_dbg(dev, "Configuring virtual queues...\n");
+	/* Configure size of each virtual queue accessible to host */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		u32 dma_addr_lo;
+		u32 dma_addr_hi;
+
+		cmd_q = &ccp->cmd_q[i];
+
+		cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
+		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
+
+		cmd_q->qdma_tail = cmd_q->qbase_dma;
+		dma_addr_lo = low_address(cmd_q->qdma_tail);
+		iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
+		iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
+
+		dma_addr_hi = high_address(cmd_q->qdma_tail);
+		cmd_q->qcontrol |= (dma_addr_hi << 16);
+		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+		/* Find the LSB regions accessible to the queue */
+		ccp_find_lsb_regions(cmd_q, status);
+		cmd_q->lsb = -1; /* Unassigned value */
+	}
+
+	dev_dbg(dev, "Assigning LSBs...\n");
+	ret = ccp_assign_lsbs(ccp);
+	if (ret) {
+		dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
+		goto e_irq;
+	}
+
+	/* Optimization: pre-allocate LSB slots for each queue */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
+		ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
+	}
+
+	dev_dbg(dev, "Starting threads...\n");
+	/* Create a kthread for each queue */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct task_struct *kthread;
+
+		cmd_q = &ccp->cmd_q[i];
+
+		kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+					 "%s-q%u", ccp->name, cmd_q->id);
+		if (IS_ERR(kthread)) {
+			dev_err(dev, "error creating queue thread (%ld)\n",
+				PTR_ERR(kthread));
+			ret = PTR_ERR(kthread);
+			goto e_kthread;
+		}
+
+		cmd_q->kthread = kthread;
+		wake_up_process(kthread);
+	}
+
+	dev_dbg(dev, "Enabling interrupts...\n");
+	ccp5_enable_queue_interrupts(ccp);
+
+	dev_dbg(dev, "Registering device...\n");
+	/* Put this on the unit list to make it available */
+	ccp_add_device(ccp);
+
+	ret = ccp_register_rng(ccp);
+	if (ret)
+		goto e_kthread;
+
+	/* Register the DMA engine support */
+	ret = ccp_dmaengine_register(ccp);
+	if (ret)
+		goto e_hwrng;
+
+	/* Set up debugfs entries */
+	ccp5_debugfs_setup(ccp);
+
+	return 0;
+
+e_hwrng:
+	ccp_unregister_rng(ccp);
+
+e_kthread:
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		if (ccp->cmd_q[i].kthread)
+			kthread_stop(ccp->cmd_q[i].kthread);
+
+e_irq:
+	sp_free_ccp_irq(ccp->sp, ccp);
+
+e_pool:
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+	return ret;
+}
+
+static void ccp5_destroy(struct ccp_device *ccp)
+{
+	struct device *dev = ccp->dev;
+	struct ccp_cmd_queue *cmd_q;
+	struct ccp_cmd *cmd;
+	unsigned int i;
+
+	/* Unregister the DMA engine */
+	ccp_dmaengine_unregister(ccp);
+
+	/* Unregister the RNG */
+	ccp_unregister_rng(ccp);
+
+	/* Remove this device from the list of available units first */
+	ccp_del_device(ccp);
+
+	/* We're in the process of tearing down the entire driver;
+	 * when all the devices are gone clean up debugfs
+	 */
+	if (ccp_present())
+		ccp5_debugfs_destroy();
+
+	/* Disable and clear interrupts */
+	ccp5_disable_queue_interrupts(ccp);
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		/* Turn off the run bit */
+		iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
+
+		/* Clear the interrupt status */
+		iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+		ioread32(cmd_q->reg_int_status);
+		ioread32(cmd_q->reg_status);
+	}
+
+	/* Stop the queue kthreads */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		if (ccp->cmd_q[i].kthread)
+			kthread_stop(ccp->cmd_q[i].kthread);
+
+	sp_free_ccp_irq(ccp->sp, ccp);
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+		dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
+				  cmd_q->qbase_dma);
+	}
+
+	/* Flush the cmd and backlog queue */
+	while (!list_empty(&ccp->cmd)) {
+		/* Invoke the callback directly with an error code */
+		cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+		list_del(&cmd->entry);
+		cmd->callback(cmd->data, -ENODEV);
+	}
+	while (!list_empty(&ccp->backlog)) {
+		/* Invoke the callback directly with an error code */
+		cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+		list_del(&cmd->entry);
+		cmd->callback(cmd->data, -ENODEV);
+	}
+}
+
+static void ccp5_config(struct ccp_device *ccp)
+{
+	/* Public side */
+	iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+}
+
+static void ccp5other_config(struct ccp_device *ccp)
+{
+	int i;
+	u32 rnd;
+
+	/* We own all of the queues on the NTB CCP */
+
+	iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
+	iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
+	for (i = 0; i < 12; i++) {
+		rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
+		iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
+	}
+
+	iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
+	iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
+	iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
+
+	iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
+	iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
+
+	iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
+
+	ccp5_config(ccp);
+}
+
+/* Version 5 adds some function, but is essentially the same as v5 */
+static const struct ccp_actions ccp5_actions = {
+	.aes = ccp5_perform_aes,
+	.xts_aes = ccp5_perform_xts_aes,
+	.sha = ccp5_perform_sha,
+	.des3 = ccp5_perform_des3,
+	.rsa = ccp5_perform_rsa,
+	.passthru = ccp5_perform_passthru,
+	.ecc = ccp5_perform_ecc,
+	.sballoc = ccp_lsb_alloc,
+	.sbfree = ccp_lsb_free,
+	.init = ccp5_init,
+	.destroy = ccp5_destroy,
+	.get_free_slots = ccp5_get_free_slots,
+};
+
+const struct ccp_vdata ccpv5a = {
+	.version = CCP_VERSION(5, 0),
+	.setup = ccp5_config,
+	.perform = &ccp5_actions,
+	.offset = 0x0,
+	.rsamax = CCP5_RSA_MAX_WIDTH,
+};
+
+const struct ccp_vdata ccpv5b = {
+	.version = CCP_VERSION(5, 0),
+	.dma_chan_attr = DMA_PRIVATE,
+	.setup = ccp5other_config,
+	.perform = &ccp5_actions,
+	.offset = 0x0,
+	.rsamax = CCP5_RSA_MAX_WIDTH,
+};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
new file mode 100644
index 0000000..1b5035d
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -0,0 +1,629 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/cpu.h>
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#endif
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+struct ccp_tasklet_data {
+	struct completion completion;
+	struct ccp_cmd *cmd;
+};
+
+/* Human-readable error strings */
+static char *ccp_error_codes[] = {
+	"",
+	"ERR 01: ILLEGAL_ENGINE",
+	"ERR 02: ILLEGAL_KEY_ID",
+	"ERR 03: ILLEGAL_FUNCTION_TYPE",
+	"ERR 04: ILLEGAL_FUNCTION_MODE",
+	"ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
+	"ERR 06: ILLEGAL_FUNCTION_SIZE",
+	"ERR 07: Zlib_MISSING_INIT_EOM",
+	"ERR 08: ILLEGAL_FUNCTION_RSVD",
+	"ERR 09: ILLEGAL_BUFFER_LENGTH",
+	"ERR 10: VLSB_FAULT",
+	"ERR 11: ILLEGAL_MEM_ADDR",
+	"ERR 12: ILLEGAL_MEM_SEL",
+	"ERR 13: ILLEGAL_CONTEXT_ID",
+	"ERR 14: ILLEGAL_KEY_ADDR",
+	"ERR 15: 0xF Reserved",
+	"ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
+	"ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
+	"ERR 18: CMD_TIMEOUT",
+	"ERR 19: IDMA0_AXI_SLVERR",
+	"ERR 20: IDMA0_AXI_DECERR",
+	"ERR 21: 0x15 Reserved",
+	"ERR 22: IDMA1_AXI_SLAVE_FAULT",
+	"ERR 23: IDMA1_AIXI_DECERR",
+	"ERR 24: 0x18 Reserved",
+	"ERR 25: ZLIBVHB_AXI_SLVERR",
+	"ERR 26: ZLIBVHB_AXI_DECERR",
+	"ERR 27: 0x1B Reserved",
+	"ERR 27: ZLIB_UNEXPECTED_EOM",
+	"ERR 27: ZLIB_EXTRA_DATA",
+	"ERR 30: ZLIB_BTYPE",
+	"ERR 31: ZLIB_UNDEFINED_SYMBOL",
+	"ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
+	"ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
+	"ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
+	"ERR 35: ZLIB_UNCOMPRESSED_LEN",
+	"ERR 36: ZLIB_LIMIT_REACHED",
+	"ERR 37: ZLIB_CHECKSUM_MISMATCH0",
+	"ERR 38: ODMA0_AXI_SLVERR",
+	"ERR 39: ODMA0_AXI_DECERR",
+	"ERR 40: 0x28 Reserved",
+	"ERR 41: ODMA1_AXI_SLVERR",
+	"ERR 42: ODMA1_AXI_DECERR",
+	"ERR 43: LSB_PARITY_ERR",
+};
+
+void ccp_log_error(struct ccp_device *d, int e)
+{
+	dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+}
+
+/* List of CCPs, CCP count, read-write access lock, and access functions
+ *
+ * Lock structure: get ccp_unit_lock for reading whenever we need to
+ * examine the CCP list. While holding it for reading we can acquire
+ * the RR lock to update the round-robin next-CCP pointer. The unit lock
+ * must be acquired before the RR lock.
+ *
+ * If the unit-lock is acquired for writing, we have total control over
+ * the list, so there's no value in getting the RR lock.
+ */
+static DEFINE_RWLOCK(ccp_unit_lock);
+static LIST_HEAD(ccp_units);
+
+/* Round-robin counter */
+static DEFINE_SPINLOCK(ccp_rr_lock);
+static struct ccp_device *ccp_rr;
+
+/**
+ * ccp_add_device - add a CCP device to the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Put this CCP on the unit list, which makes it available
+ * for use.
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+void ccp_add_device(struct ccp_device *ccp)
+{
+	unsigned long flags;
+
+	write_lock_irqsave(&ccp_unit_lock, flags);
+	list_add_tail(&ccp->entry, &ccp_units);
+	if (!ccp_rr)
+		/* We already have the list lock (we're first) so this
+		 * pointer can't change on us. Set its initial value.
+		 */
+		ccp_rr = ccp;
+	write_unlock_irqrestore(&ccp_unit_lock, flags);
+}
+
+/**
+ * ccp_del_device - remove a CCP device from the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Remove this unit from the list of devices. If the next device
+ * up for use is this one, adjust the pointer. If this is the last
+ * device, NULL the pointer.
+ */
+void ccp_del_device(struct ccp_device *ccp)
+{
+	unsigned long flags;
+
+	write_lock_irqsave(&ccp_unit_lock, flags);
+	if (ccp_rr == ccp) {
+		/* ccp_unit_lock is read/write; any read access
+		 * will be suspended while we make changes to the
+		 * list and RR pointer.
+		 */
+		if (list_is_last(&ccp_rr->entry, &ccp_units))
+			ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+						  entry);
+		else
+			ccp_rr = list_next_entry(ccp_rr, entry);
+	}
+	list_del(&ccp->entry);
+	if (list_empty(&ccp_units))
+		ccp_rr = NULL;
+	write_unlock_irqrestore(&ccp_unit_lock, flags);
+}
+
+
+
+int ccp_register_rng(struct ccp_device *ccp)
+{
+	int ret = 0;
+
+	dev_dbg(ccp->dev, "Registering RNG...\n");
+	/* Register an RNG */
+	ccp->hwrng.name = ccp->rngname;
+	ccp->hwrng.read = ccp_trng_read;
+	ret = hwrng_register(&ccp->hwrng);
+	if (ret)
+		dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
+
+	return ret;
+}
+
+void ccp_unregister_rng(struct ccp_device *ccp)
+{
+	if (ccp->hwrng.name)
+		hwrng_unregister(&ccp->hwrng);
+}
+
+static struct ccp_device *ccp_get_device(void)
+{
+	unsigned long flags;
+	struct ccp_device *dp = NULL;
+
+	/* We round-robin through the unit list.
+	 * The (ccp_rr) pointer refers to the next unit to use.
+	 */
+	read_lock_irqsave(&ccp_unit_lock, flags);
+	if (!list_empty(&ccp_units)) {
+		spin_lock(&ccp_rr_lock);
+		dp = ccp_rr;
+		if (list_is_last(&ccp_rr->entry, &ccp_units))
+			ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+						  entry);
+		else
+			ccp_rr = list_next_entry(ccp_rr, entry);
+		spin_unlock(&ccp_rr_lock);
+	}
+	read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+	return dp;
+}
+
+/**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void)
+{
+	unsigned long flags;
+	int ret;
+
+	read_lock_irqsave(&ccp_unit_lock, flags);
+	ret = list_empty(&ccp_units);
+	read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+	return ret ? -ENODEV : 0;
+}
+EXPORT_SYMBOL_GPL(ccp_present);
+
+/**
+ * ccp_version - get the version of the CCP device
+ *
+ * Returns the version from the first unit on the list;
+ * otherwise a zero if no CCP device is present
+ */
+unsigned int ccp_version(void)
+{
+	struct ccp_device *dp;
+	unsigned long flags;
+	int ret = 0;
+
+	read_lock_irqsave(&ccp_unit_lock, flags);
+	if (!list_empty(&ccp_units)) {
+		dp = list_first_entry(&ccp_units, struct ccp_device, entry);
+		ret = dp->vdata->version;
+	}
+	read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ccp_version);
+
+/**
+ * ccp_enqueue_cmd - queue an operation for processing by the CCP
+ *
+ * @cmd: ccp_cmd struct to be processed
+ *
+ * Queue a cmd to be processed by the CCP. If queueing the cmd
+ * would exceed the defined length of the cmd queue the cmd will
+ * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
+ * result in a return code of -EBUSY.
+ *
+ * The callback routine specified in the ccp_cmd struct will be
+ * called to notify the caller of completion (if the cmd was not
+ * backlogged) or advancement out of the backlog. If the cmd has
+ * advanced out of the backlog the "err" value of the callback
+ * will be -EINPROGRESS. Any other "err" value during callback is
+ * the result of the operation.
+ *
+ * The cmd has been successfully queued if:
+ *   the return code is -EINPROGRESS or
+ *   the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
+ */
+int ccp_enqueue_cmd(struct ccp_cmd *cmd)
+{
+	struct ccp_device *ccp;
+	unsigned long flags;
+	unsigned int i;
+	int ret;
+
+	/* Some commands might need to be sent to a specific device */
+	ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
+
+	if (!ccp)
+		return -ENODEV;
+
+	/* Caller must supply a callback routine */
+	if (!cmd->callback)
+		return -EINVAL;
+
+	cmd->ccp = ccp;
+
+	spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+	i = ccp->cmd_q_count;
+
+	if (ccp->cmd_count >= MAX_CMD_QLEN) {
+		if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
+			ret = -EBUSY;
+			list_add_tail(&cmd->entry, &ccp->backlog);
+		} else {
+			ret = -ENOSPC;
+		}
+	} else {
+		ret = -EINPROGRESS;
+		ccp->cmd_count++;
+		list_add_tail(&cmd->entry, &ccp->cmd);
+
+		/* Find an idle queue */
+		if (!ccp->suspending) {
+			for (i = 0; i < ccp->cmd_q_count; i++) {
+				if (ccp->cmd_q[i].active)
+					continue;
+
+				break;
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+	/* If we found an idle queue, wake it up */
+	if (i < ccp->cmd_q_count)
+		wake_up_process(ccp->cmd_q[i].kthread);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
+
+static void ccp_do_cmd_backlog(struct work_struct *work)
+{
+	struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
+	struct ccp_device *ccp = cmd->ccp;
+	unsigned long flags;
+	unsigned int i;
+
+	cmd->callback(cmd->data, -EINPROGRESS);
+
+	spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+	ccp->cmd_count++;
+	list_add_tail(&cmd->entry, &ccp->cmd);
+
+	/* Find an idle queue */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		if (ccp->cmd_q[i].active)
+			continue;
+
+		break;
+	}
+
+	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+	/* If we found an idle queue, wake it up */
+	if (i < ccp->cmd_q_count)
+		wake_up_process(ccp->cmd_q[i].kthread);
+}
+
+static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
+{
+	struct ccp_device *ccp = cmd_q->ccp;
+	struct ccp_cmd *cmd = NULL;
+	struct ccp_cmd *backlog = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+	cmd_q->active = 0;
+
+	if (ccp->suspending) {
+		cmd_q->suspended = 1;
+
+		spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+		wake_up_interruptible(&ccp->suspend_queue);
+
+		return NULL;
+	}
+
+	if (ccp->cmd_count) {
+		cmd_q->active = 1;
+
+		cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+		list_del(&cmd->entry);
+
+		ccp->cmd_count--;
+	}
+
+	if (!list_empty(&ccp->backlog)) {
+		backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
+					   entry);
+		list_del(&backlog->entry);
+	}
+
+	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+	if (backlog) {
+		INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
+		schedule_work(&backlog->work);
+	}
+
+	return cmd;
+}
+
+static void ccp_do_cmd_complete(unsigned long data)
+{
+	struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
+	struct ccp_cmd *cmd = tdata->cmd;
+
+	cmd->callback(cmd->data, cmd->ret);
+
+	complete(&tdata->completion);
+}
+
+/**
+ * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
+ *
+ * @data: thread-specific data
+ */
+int ccp_cmd_queue_thread(void *data)
+{
+	struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
+	struct ccp_cmd *cmd;
+	struct ccp_tasklet_data tdata;
+	struct tasklet_struct tasklet;
+
+	tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		schedule();
+
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		cmd = ccp_dequeue_cmd(cmd_q);
+		if (!cmd)
+			continue;
+
+		__set_current_state(TASK_RUNNING);
+
+		/* Execute the command */
+		cmd->ret = ccp_run_cmd(cmd_q, cmd);
+
+		/* Schedule the completion callback */
+		tdata.cmd = cmd;
+		init_completion(&tdata.completion);
+		tasklet_schedule(&tasklet);
+		wait_for_completion(&tdata.completion);
+	}
+
+	__set_current_state(TASK_RUNNING);
+
+	return 0;
+}
+
+/**
+ * ccp_alloc_struct - allocate and initialize the ccp_device struct
+ *
+ * @dev: device struct of the CCP
+ */
+struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
+{
+	struct device *dev = sp->dev;
+	struct ccp_device *ccp;
+
+	ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
+	if (!ccp)
+		return NULL;
+	ccp->dev = dev;
+	ccp->sp = sp;
+	ccp->axcache = sp->axcache;
+
+	INIT_LIST_HEAD(&ccp->cmd);
+	INIT_LIST_HEAD(&ccp->backlog);
+
+	spin_lock_init(&ccp->cmd_lock);
+	mutex_init(&ccp->req_mutex);
+	mutex_init(&ccp->sb_mutex);
+	ccp->sb_count = KSB_COUNT;
+	ccp->sb_start = 0;
+
+	/* Initialize the wait queues */
+	init_waitqueue_head(&ccp->sb_queue);
+	init_waitqueue_head(&ccp->suspend_queue);
+
+	snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
+	snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
+
+	return ccp;
+}
+
+int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
+	u32 trng_value;
+	int len = min_t(int, sizeof(trng_value), max);
+
+	/* Locking is provided by the caller so we can update device
+	 * hwrng-related fields safely
+	 */
+	trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
+	if (!trng_value) {
+		/* Zero is returned if not data is available or if a
+		 * bad-entropy error is present. Assume an error if
+		 * we exceed TRNG_RETRIES reads of zero.
+		 */
+		if (ccp->hwrng_retries++ > TRNG_RETRIES)
+			return -EIO;
+
+		return 0;
+	}
+
+	/* Reset the counter and save the rng value */
+	ccp->hwrng_retries = 0;
+	memcpy(data, &trng_value, len);
+
+	return len;
+}
+
+#ifdef CONFIG_PM
+bool ccp_queues_suspended(struct ccp_device *ccp)
+{
+	unsigned int suspended = 0;
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		if (ccp->cmd_q[i].suspended)
+			suspended++;
+
+	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+	return ccp->cmd_q_count == suspended;
+}
+
+int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+{
+	struct ccp_device *ccp = sp->ccp_data;
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+	ccp->suspending = 1;
+
+	/* Wake all the queue kthreads to prepare for suspend */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		wake_up_process(ccp->cmd_q[i].kthread);
+
+	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+	/* Wait for all queue kthreads to say they're done */
+	while (!ccp_queues_suspended(ccp))
+		wait_event_interruptible(ccp->suspend_queue,
+					 ccp_queues_suspended(ccp));
+
+	return 0;
+}
+
+int ccp_dev_resume(struct sp_device *sp)
+{
+	struct ccp_device *ccp = sp->ccp_data;
+	unsigned long flags;
+	unsigned int i;
+
+	spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+	ccp->suspending = 0;
+
+	/* Wake up all the kthreads */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		ccp->cmd_q[i].suspended = 0;
+		wake_up_process(ccp->cmd_q[i].kthread);
+	}
+
+	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+	return 0;
+}
+#endif
+
+int ccp_dev_init(struct sp_device *sp)
+{
+	struct device *dev = sp->dev;
+	struct ccp_device *ccp;
+	int ret;
+
+	ret = -ENOMEM;
+	ccp = ccp_alloc_struct(sp);
+	if (!ccp)
+		goto e_err;
+	sp->ccp_data = ccp;
+
+	ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
+	if (!ccp->vdata || !ccp->vdata->version) {
+		ret = -ENODEV;
+		dev_err(dev, "missing driver data\n");
+		goto e_err;
+	}
+
+	ccp->use_tasklet = sp->use_tasklet;
+
+	ccp->io_regs = sp->io_map + ccp->vdata->offset;
+	if (ccp->vdata->setup)
+		ccp->vdata->setup(ccp);
+
+	ret = ccp->vdata->perform->init(ccp);
+	if (ret)
+		goto e_err;
+
+	dev_notice(dev, "ccp enabled\n");
+
+	return 0;
+
+e_err:
+	sp->ccp_data = NULL;
+
+	dev_notice(dev, "ccp initialization failed\n");
+
+	return ret;
+}
+
+void ccp_dev_destroy(struct sp_device *sp)
+{
+	struct ccp_device *ccp = sp->ccp_data;
+
+	if (!ccp)
+		return;
+
+	ccp->vdata->perform->destroy(ccp);
+}
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
new file mode 100644
index 0000000..6810b65
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -0,0 +1,674 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CCP_DEV_H__
+#define __CCP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+
+#include "sp-dev.h"
+
+#define MAX_CCP_NAME_LEN		16
+#define MAX_DMAPOOL_NAME_LEN		32
+
+#define MAX_HW_QUEUES			5
+#define MAX_CMD_QLEN			100
+
+#define TRNG_RETRIES			10
+
+#define CACHE_NONE			0x00
+#define CACHE_WB_NO_ALLOC		0xb7
+
+/****** Register Mappings ******/
+#define Q_MASK_REG			0x000
+#define TRNG_OUT_REG			0x00c
+#define IRQ_MASK_REG			0x040
+#define IRQ_STATUS_REG			0x200
+
+#define DEL_CMD_Q_JOB			0x124
+#define DEL_Q_ACTIVE			0x00000200
+#define DEL_Q_ID_SHIFT			6
+
+#define CMD_REQ0			0x180
+#define CMD_REQ_INCR			0x04
+
+#define CMD_Q_STATUS_BASE		0x210
+#define CMD_Q_INT_STATUS_BASE		0x214
+#define CMD_Q_STATUS_INCR		0x20
+
+#define CMD_Q_CACHE_BASE		0x228
+#define CMD_Q_CACHE_INC			0x20
+
+#define CMD_Q_ERROR(__qs)		((__qs) & 0x0000003f)
+#define CMD_Q_DEPTH(__qs)		(((__qs) >> 12) & 0x0000000f)
+
+/* ------------------------ CCP Version 5 Specifics ------------------------ */
+#define CMD5_QUEUE_MASK_OFFSET		0x00
+#define	CMD5_QUEUE_PRIO_OFFSET		0x04
+#define CMD5_REQID_CONFIG_OFFSET	0x08
+#define	CMD5_CMD_TIMEOUT_OFFSET		0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET	0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET	0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET	0x24
+#define CMD5_PSP_CCP_VERSION		0x100
+
+#define CMD5_Q_CONTROL_BASE		0x0000
+#define CMD5_Q_TAIL_LO_BASE		0x0004
+#define CMD5_Q_HEAD_LO_BASE		0x0008
+#define CMD5_Q_INT_ENABLE_BASE		0x000C
+#define CMD5_Q_INTERRUPT_STATUS_BASE	0x0010
+
+#define CMD5_Q_STATUS_BASE		0x0100
+#define CMD5_Q_INT_STATUS_BASE		0x0104
+#define CMD5_Q_DMA_STATUS_BASE		0x0108
+#define CMD5_Q_DMA_READ_STATUS_BASE	0x010C
+#define CMD5_Q_DMA_WRITE_STATUS_BASE	0x0110
+#define CMD5_Q_ABORT_BASE		0x0114
+#define CMD5_Q_AX_CACHE_BASE		0x0118
+
+#define	CMD5_CONFIG_0_OFFSET		0x6000
+#define	CMD5_TRNG_CTL_OFFSET		0x6008
+#define	CMD5_AES_MASK_OFFSET		0x6010
+#define	CMD5_CLK_GATE_CTL_OFFSET	0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD5_Q_STATUS_INCR		0x1000
+
+/* Bit masks */
+#define CMD5_Q_RUN			0x1
+#define CMD5_Q_HALT			0x2
+#define CMD5_Q_MEM_LOCATION		0x4
+#define CMD5_Q_SIZE			0x1F
+#define CMD5_Q_SHIFT			3
+#define COMMANDS_PER_QUEUE		16
+#define QUEUE_SIZE_VAL			((ffs(COMMANDS_PER_QUEUE) - 2) & \
+					  CMD5_Q_SIZE)
+#define Q_PTR_MASK			(2 << (QUEUE_SIZE_VAL + 5) - 1)
+#define Q_DESC_SIZE			sizeof(struct ccp5_desc)
+#define Q_SIZE(n)			(COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION			0x1
+#define INT_ERROR			0x2
+#define INT_QUEUE_STOPPED		0x4
+#define	INT_EMPTY_QUEUE			0x8
+#define SUPPORTED_INTERRUPTS		(INT_COMPLETION | INT_ERROR)
+
+#define LSB_REGION_WIDTH		5
+#define MAX_LSB_CNT			8
+
+#define LSB_SIZE			16
+#define LSB_ITEM_SIZE			32
+#define PLSB_MAP_SIZE			(LSB_SIZE)
+#define SLSB_MAP_SIZE			(MAX_LSB_CNT * LSB_SIZE)
+
+#define LSB_ENTRY_NUMBER(LSB_ADDR)	(LSB_ADDR / LSB_ITEM_SIZE)
+
+/* ------------------------ CCP Version 3 Specifics ------------------------ */
+#define REQ0_WAIT_FOR_WRITE		0x00000004
+#define REQ0_INT_ON_COMPLETE		0x00000002
+#define REQ0_STOP_ON_COMPLETE		0x00000001
+
+#define REQ0_CMD_Q_SHIFT		9
+#define REQ0_JOBID_SHIFT		3
+
+/****** REQ1 Related Values ******/
+#define REQ1_PROTECT_SHIFT		27
+#define REQ1_ENGINE_SHIFT		23
+#define REQ1_KEY_KSB_SHIFT		2
+
+#define REQ1_EOM			0x00000002
+#define REQ1_INIT			0x00000001
+
+/* AES Related Values */
+#define REQ1_AES_TYPE_SHIFT		21
+#define REQ1_AES_MODE_SHIFT		18
+#define REQ1_AES_ACTION_SHIFT		17
+#define REQ1_AES_CFB_SIZE_SHIFT		10
+
+/* XTS-AES Related Values */
+#define REQ1_XTS_AES_SIZE_SHIFT		10
+
+/* SHA Related Values */
+#define REQ1_SHA_TYPE_SHIFT		21
+
+/* RSA Related Values */
+#define REQ1_RSA_MOD_SIZE_SHIFT		10
+
+/* Pass-Through Related Values */
+#define REQ1_PT_BW_SHIFT		12
+#define REQ1_PT_BS_SHIFT		10
+
+/* ECC Related Values */
+#define REQ1_ECC_AFFINE_CONVERT		0x00200000
+#define REQ1_ECC_FUNCTION_SHIFT		18
+
+/****** REQ4 Related Values ******/
+#define REQ4_KSB_SHIFT			18
+#define REQ4_MEMTYPE_SHIFT		16
+
+/****** REQ6 Related Values ******/
+#define REQ6_MEMTYPE_SHIFT		16
+
+/****** Key Storage Block ******/
+#define KSB_START			77
+#define KSB_END				127
+#define KSB_COUNT			(KSB_END - KSB_START + 1)
+#define CCP_SB_BITS			256
+
+#define CCP_JOBID_MASK			0x0000003f
+
+/* ------------------------ General CCP Defines ------------------------ */
+
+#define	CCP_DMA_DFLT			0x0
+#define	CCP_DMA_PRIV			0x1
+#define	CCP_DMA_PUB			0x2
+
+#define CCP_DMAPOOL_MAX_SIZE		64
+#define CCP_DMAPOOL_ALIGN		BIT(5)
+
+#define CCP_REVERSE_BUF_SIZE		64
+
+#define CCP_AES_KEY_SB_COUNT		1
+#define CCP_AES_CTX_SB_COUNT		1
+
+#define CCP_XTS_AES_KEY_SB_COUNT	1
+#define CCP5_XTS_AES_KEY_SB_COUNT	2
+#define CCP_XTS_AES_CTX_SB_COUNT	1
+
+#define CCP_DES3_KEY_SB_COUNT		1
+#define CCP_DES3_CTX_SB_COUNT		1
+
+#define CCP_SHA_SB_COUNT		1
+
+#define CCP_RSA_MAX_WIDTH		4096
+#define CCP5_RSA_MAX_WIDTH		16384
+
+#define CCP_PASSTHRU_BLOCKSIZE		256
+#define CCP_PASSTHRU_MASKSIZE		32
+#define CCP_PASSTHRU_SB_COUNT		1
+
+#define CCP_ECC_MODULUS_BYTES		48      /* 384-bits */
+#define CCP_ECC_MAX_OPERANDS		6
+#define CCP_ECC_MAX_OUTPUTS		3
+#define CCP_ECC_SRC_BUF_SIZE		448
+#define CCP_ECC_DST_BUF_SIZE		192
+#define CCP_ECC_OPERAND_SIZE		64
+#define CCP_ECC_OUTPUT_SIZE		64
+#define CCP_ECC_RESULT_OFFSET		60
+#define CCP_ECC_RESULT_SUCCESS		0x0001
+
+#define CCP_SB_BYTES			32
+
+struct ccp_op;
+struct ccp_device;
+struct ccp_cmd;
+struct ccp_fns;
+
+struct ccp_dma_cmd {
+	struct list_head entry;
+
+	struct ccp_cmd ccp_cmd;
+};
+
+struct ccp_dma_desc {
+	struct list_head entry;
+
+	struct ccp_device *ccp;
+
+	struct list_head pending;
+	struct list_head active;
+
+	enum dma_status status;
+	struct dma_async_tx_descriptor tx_desc;
+	size_t len;
+};
+
+struct ccp_dma_chan {
+	struct ccp_device *ccp;
+
+	spinlock_t lock;
+	struct list_head created;
+	struct list_head pending;
+	struct list_head active;
+	struct list_head complete;
+
+	struct tasklet_struct cleanup_tasklet;
+
+	enum dma_status status;
+	struct dma_chan dma_chan;
+};
+
+struct ccp_cmd_queue {
+	struct ccp_device *ccp;
+
+	/* Queue identifier */
+	u32 id;
+
+	/* Queue dma pool */
+	struct dma_pool *dma_pool;
+
+	/* Queue base address (not neccessarily aligned)*/
+	struct ccp5_desc *qbase;
+
+	/* Aligned queue start address (per requirement) */
+	struct mutex q_mutex ____cacheline_aligned;
+	unsigned int qidx;
+
+	/* Version 5 has different requirements for queue memory */
+	unsigned int qsize;
+	dma_addr_t qbase_dma;
+	dma_addr_t qdma_tail;
+
+	/* Per-queue reserved storage block(s) */
+	u32 sb_key;
+	u32 sb_ctx;
+
+	/* Bitmap of LSBs that can be accessed by this queue */
+	DECLARE_BITMAP(lsbmask, MAX_LSB_CNT);
+	/* Private LSB that is assigned to this queue, or -1 if none.
+	 * Bitmap for my private LSB, unused otherwise
+	 */
+	int lsb;
+	DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
+
+	/* Queue processing thread */
+	struct task_struct *kthread;
+	unsigned int active;
+	unsigned int suspended;
+
+	/* Number of free command slots available */
+	unsigned int free_slots;
+
+	/* Interrupt masks */
+	u32 int_ok;
+	u32 int_err;
+
+	/* Register addresses for queue */
+	void __iomem *reg_control;
+	void __iomem *reg_tail_lo;
+	void __iomem *reg_head_lo;
+	void __iomem *reg_int_enable;
+	void __iomem *reg_interrupt_status;
+	void __iomem *reg_status;
+	void __iomem *reg_int_status;
+	void __iomem *reg_dma_status;
+	void __iomem *reg_dma_read_status;
+	void __iomem *reg_dma_write_status;
+	u32 qcontrol; /* Cached control register */
+
+	/* Status values from job */
+	u32 int_status;
+	u32 q_status;
+	u32 q_int_status;
+	u32 cmd_error;
+
+	/* Interrupt wait queue */
+	wait_queue_head_t int_queue;
+	unsigned int int_rcvd;
+
+	/* Per-queue Statistics */
+	unsigned long total_ops;
+	unsigned long total_aes_ops;
+	unsigned long total_xts_aes_ops;
+	unsigned long total_3des_ops;
+	unsigned long total_sha_ops;
+	unsigned long total_rsa_ops;
+	unsigned long total_pt_ops;
+	unsigned long total_ecc_ops;
+} ____cacheline_aligned;
+
+struct ccp_device {
+	struct list_head entry;
+
+	struct ccp_vdata *vdata;
+	unsigned int ord;
+	char name[MAX_CCP_NAME_LEN];
+	char rngname[MAX_CCP_NAME_LEN];
+
+	struct device *dev;
+	struct sp_device *sp;
+
+	/* Bus specific device information
+	 */
+	void *dev_specific;
+	unsigned int qim;
+	unsigned int irq;
+	bool use_tasklet;
+	struct tasklet_struct irq_tasklet;
+
+	/* I/O area used for device communication. The register mapping
+	 * starts at an offset into the mapped bar.
+	 *   The CMD_REQx registers and the Delete_Cmd_Queue_Job register
+	 *   need to be protected while a command queue thread is accessing
+	 *   them.
+	 */
+	struct mutex req_mutex ____cacheline_aligned;
+	void __iomem *io_regs;
+
+	/* Master lists that all cmds are queued on. Because there can be
+	 * more than one CCP command queue that can process a cmd a separate
+	 * backlog list is neeeded so that the backlog completion call
+	 * completes before the cmd is available for execution.
+	 */
+	spinlock_t cmd_lock ____cacheline_aligned;
+	unsigned int cmd_count;
+	struct list_head cmd;
+	struct list_head backlog;
+
+	/* The command queues. These represent the queues available on the
+	 * CCP that are available for processing cmds
+	 */
+	struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
+	unsigned int cmd_q_count;
+
+	/* Support for the CCP True RNG
+	 */
+	struct hwrng hwrng;
+	unsigned int hwrng_retries;
+
+	/* Support for the CCP DMA capabilities
+	 */
+	struct dma_device dma_dev;
+	struct ccp_dma_chan *ccp_dma_chan;
+	struct kmem_cache *dma_cmd_cache;
+	struct kmem_cache *dma_desc_cache;
+
+	/* A counter used to generate job-ids for cmds submitted to the CCP
+	 */
+	atomic_t current_id ____cacheline_aligned;
+
+	/* The v3 CCP uses key storage blocks (SB) to maintain context for
+	 * certain operations. To prevent multiple cmds from using the same
+	 * SB range a command queue reserves an SB range for the duration of
+	 * the cmd. Each queue, will however, reserve 2 SB blocks for
+	 * operations that only require single SB entries (eg. AES context/iv
+	 * and key) in order to avoid allocation contention.  This will reserve
+	 * at most 10 SB entries, leaving 40 SB entries available for dynamic
+	 * allocation.
+	 *
+	 * The v5 CCP Local Storage Block (LSB) is broken up into 8
+	 * memrory ranges, each of which can be enabled for access by one
+	 * or more queues. Device initialization takes this into account,
+	 * and attempts to assign one region for exclusive use by each
+	 * available queue; the rest are then aggregated as "public" use.
+	 * If there are fewer regions than queues, all regions are shared
+	 * amongst all queues.
+	 */
+	struct mutex sb_mutex ____cacheline_aligned;
+	DECLARE_BITMAP(sb, KSB_COUNT);
+	wait_queue_head_t sb_queue;
+	unsigned int sb_avail;
+	unsigned int sb_count;
+	u32 sb_start;
+
+	/* Bitmap of shared LSBs, if any */
+	DECLARE_BITMAP(lsbmap, SLSB_MAP_SIZE);
+
+	/* Suspend support */
+	unsigned int suspending;
+	wait_queue_head_t suspend_queue;
+
+	/* DMA caching attribute support */
+	unsigned int axcache;
+
+	/* Device Statistics */
+	unsigned long total_interrupts;
+
+	/* DebugFS info */
+	struct dentry *debugfs_instance;
+};
+
+enum ccp_memtype {
+	CCP_MEMTYPE_SYSTEM = 0,
+	CCP_MEMTYPE_SB,
+	CCP_MEMTYPE_LOCAL,
+	CCP_MEMTYPE__LAST,
+};
+#define	CCP_MEMTYPE_LSB	CCP_MEMTYPE_KSB
+
+
+struct ccp_dma_info {
+	dma_addr_t address;
+	unsigned int offset;
+	unsigned int length;
+	enum dma_data_direction dir;
+} __packed __aligned(4);
+
+struct ccp_dm_workarea {
+	struct device *dev;
+	struct dma_pool *dma_pool;
+
+	u8 *address;
+	struct ccp_dma_info dma;
+	unsigned int length;
+};
+
+struct ccp_sg_workarea {
+	struct scatterlist *sg;
+	int nents;
+	unsigned int sg_used;
+
+	struct scatterlist *dma_sg;
+	struct device *dma_dev;
+	unsigned int dma_count;
+	enum dma_data_direction dma_dir;
+
+	u64 bytes_left;
+};
+
+struct ccp_data {
+	struct ccp_sg_workarea sg_wa;
+	struct ccp_dm_workarea dm_wa;
+};
+
+struct ccp_mem {
+	enum ccp_memtype type;
+	union {
+		struct ccp_dma_info dma;
+		u32 sb;
+	} u;
+};
+
+struct ccp_aes_op {
+	enum ccp_aes_type type;
+	enum ccp_aes_mode mode;
+	enum ccp_aes_action action;
+	unsigned int size;
+};
+
+struct ccp_xts_aes_op {
+	enum ccp_aes_type type;
+	enum ccp_aes_action action;
+	enum ccp_xts_aes_unit_size unit_size;
+};
+
+struct ccp_des3_op {
+	enum ccp_des3_type type;
+	enum ccp_des3_mode mode;
+	enum ccp_des3_action action;
+};
+
+struct ccp_sha_op {
+	enum ccp_sha_type type;
+	u64 msg_bits;
+};
+
+struct ccp_rsa_op {
+	u32 mod_size;
+	u32 input_len;
+};
+
+struct ccp_passthru_op {
+	enum ccp_passthru_bitwise bit_mod;
+	enum ccp_passthru_byteswap byte_swap;
+};
+
+struct ccp_ecc_op {
+	enum ccp_ecc_function function;
+};
+
+struct ccp_op {
+	struct ccp_cmd_queue *cmd_q;
+
+	u32 jobid;
+	u32 ioc;
+	u32 soc;
+	u32 sb_key;
+	u32 sb_ctx;
+	u32 init;
+	u32 eom;
+
+	struct ccp_mem src;
+	struct ccp_mem dst;
+	struct ccp_mem exp;
+
+	union {
+		struct ccp_aes_op aes;
+		struct ccp_xts_aes_op xts;
+		struct ccp_des3_op des3;
+		struct ccp_sha_op sha;
+		struct ccp_rsa_op rsa;
+		struct ccp_passthru_op passthru;
+		struct ccp_ecc_op ecc;
+	} u;
+};
+
+static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
+{
+	return lower_32_bits(info->address + info->offset);
+}
+
+static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
+{
+	return upper_32_bits(info->address + info->offset) & 0x0000ffff;
+}
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+	unsigned int soc:1;
+	unsigned int ioc:1;
+	unsigned int rsvd1:1;
+	unsigned int init:1;
+	unsigned int eom:1;		/* AES/SHA only */
+	unsigned int function:15;
+	unsigned int engine:4;
+	unsigned int prot:1;
+	unsigned int rsvd2:7;
+};
+
+struct dword3 {
+	unsigned int  src_hi:16;
+	unsigned int  src_mem:2;
+	unsigned int  lsb_cxt_id:8;
+	unsigned int  rsvd1:5;
+	unsigned int  fixed:1;
+};
+
+union dword4 {
+	__le32 dst_lo;		/* NON-SHA	*/
+	__le32 sha_len_lo;	/* SHA		*/
+};
+
+union dword5 {
+	struct {
+		unsigned int  dst_hi:16;
+		unsigned int  dst_mem:2;
+		unsigned int  rsvd1:13;
+		unsigned int  fixed:1;
+	} fields;
+	__le32 sha_len_hi;
+};
+
+struct dword7 {
+	unsigned int  key_hi:16;
+	unsigned int  key_mem:2;
+	unsigned int  rsvd1:14;
+};
+
+struct ccp5_desc {
+	struct dword0 dw0;
+	__le32 length;
+	__le32 src_lo;
+	struct dword3 dw3;
+	union dword4 dw4;
+	union dword5 dw5;
+	__le32 key_lo;
+	struct dword7 dw7;
+};
+
+void ccp_add_device(struct ccp_device *ccp);
+void ccp_del_device(struct ccp_device *ccp);
+
+extern void ccp_log_error(struct ccp_device *, int);
+
+struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
+bool ccp_queues_suspended(struct ccp_device *ccp);
+int ccp_cmd_queue_thread(void *data);
+int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait);
+
+int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
+
+int ccp_register_rng(struct ccp_device *ccp);
+void ccp_unregister_rng(struct ccp_device *ccp);
+int ccp_dmaengine_register(struct ccp_device *ccp);
+void ccp_dmaengine_unregister(struct ccp_device *ccp);
+
+void ccp5_debugfs_setup(struct ccp_device *ccp);
+void ccp5_debugfs_destroy(void);
+
+/* Structure for computation functions that are device-specific */
+struct ccp_actions {
+	int (*aes)(struct ccp_op *);
+	int (*xts_aes)(struct ccp_op *);
+	int (*des3)(struct ccp_op *);
+	int (*sha)(struct ccp_op *);
+	int (*rsa)(struct ccp_op *);
+	int (*passthru)(struct ccp_op *);
+	int (*ecc)(struct ccp_op *);
+	u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
+	void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int);
+	unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
+	int (*init)(struct ccp_device *);
+	void (*destroy)(struct ccp_device *);
+	irqreturn_t (*irqhandler)(int, void *);
+};
+
+extern const struct ccp_vdata ccpv3_platform;
+extern const struct ccp_vdata ccpv3;
+extern const struct ccp_vdata ccpv5a;
+extern const struct ccp_vdata ccpv5b;
+
+#endif
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
new file mode 100644
index 0000000..67155cb
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -0,0 +1,750 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "../../dma/dmaengine.h"
+
+#define CCP_DMA_WIDTH(_mask)		\
+({					\
+	u64 mask = _mask + 1;		\
+	(mask == 0) ? 64 : fls64(mask);	\
+})
+
+/* The CCP as a DMA provider can be configured for public or private
+ * channels. Default is specified in the vdata for the device (PCI ID).
+ * This module parameter will override for all channels on all devices:
+ *   dma_chan_attr = 0x2 to force all channels public
+ *                 = 0x1 to force all channels private
+ *                 = 0x0 to defer to the vdata setting
+ *                 = any other value: warning, revert to 0x0
+ */
+static unsigned int dma_chan_attr = CCP_DMA_DFLT;
+module_param(dma_chan_attr, uint, 0444);
+MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
+
+static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
+{
+	switch (dma_chan_attr) {
+	case CCP_DMA_DFLT:
+		return ccp->vdata->dma_chan_attr;
+
+	case CCP_DMA_PRIV:
+		return DMA_PRIVATE;
+
+	case CCP_DMA_PUB:
+		return 0;
+
+	default:
+		dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
+			      dma_chan_attr);
+		return ccp->vdata->dma_chan_attr;
+	}
+}
+
+static void ccp_free_cmd_resources(struct ccp_device *ccp,
+				   struct list_head *list)
+{
+	struct ccp_dma_cmd *cmd, *ctmp;
+
+	list_for_each_entry_safe(cmd, ctmp, list, entry) {
+		list_del(&cmd->entry);
+		kmem_cache_free(ccp->dma_cmd_cache, cmd);
+	}
+}
+
+static void ccp_free_desc_resources(struct ccp_device *ccp,
+				    struct list_head *list)
+{
+	struct ccp_dma_desc *desc, *dtmp;
+
+	list_for_each_entry_safe(desc, dtmp, list, entry) {
+		ccp_free_cmd_resources(ccp, &desc->active);
+		ccp_free_cmd_resources(ccp, &desc->pending);
+
+		list_del(&desc->entry);
+		kmem_cache_free(ccp->dma_desc_cache, desc);
+	}
+}
+
+static void ccp_free_chan_resources(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	unsigned long flags;
+
+	dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	ccp_free_desc_resources(chan->ccp, &chan->complete);
+	ccp_free_desc_resources(chan->ccp, &chan->active);
+	ccp_free_desc_resources(chan->ccp, &chan->pending);
+	ccp_free_desc_resources(chan->ccp, &chan->created);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
+				       struct list_head *list)
+{
+	struct ccp_dma_desc *desc, *dtmp;
+
+	list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
+		if (!async_tx_test_ack(&desc->tx_desc))
+			continue;
+
+		dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
+
+		ccp_free_cmd_resources(ccp, &desc->active);
+		ccp_free_cmd_resources(ccp, &desc->pending);
+
+		list_del(&desc->entry);
+		kmem_cache_free(ccp->dma_desc_cache, desc);
+	}
+}
+
+static void ccp_do_cleanup(unsigned long data)
+{
+	struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
+	unsigned long flags;
+
+	dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
+		dma_chan_name(&chan->dma_chan));
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
+{
+	struct ccp_dma_cmd *cmd;
+	int ret;
+
+	cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
+	list_move(&cmd->entry, &desc->active);
+
+	dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
+		desc->tx_desc.cookie, cmd);
+
+	ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
+	if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
+		return 0;
+
+	dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
+		ret, desc->tx_desc.cookie, cmd);
+
+	return ret;
+}
+
+static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
+{
+	struct ccp_dma_cmd *cmd;
+
+	cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
+				       entry);
+	if (!cmd)
+		return;
+
+	dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
+		__func__, desc->tx_desc.cookie, cmd);
+
+	list_del(&cmd->entry);
+	kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
+}
+
+static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
+						struct ccp_dma_desc *desc)
+{
+	/* Move current DMA descriptor to the complete list */
+	if (desc)
+		list_move(&desc->entry, &chan->complete);
+
+	/* Get the next DMA descriptor on the active list */
+	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
+					entry);
+
+	return desc;
+}
+
+static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
+						   struct ccp_dma_desc *desc)
+{
+	struct dma_async_tx_descriptor *tx_desc;
+	unsigned long flags;
+
+	/* Loop over descriptors until one is found with commands */
+	do {
+		if (desc) {
+			/* Remove the DMA command from the list and free it */
+			ccp_free_active_cmd(desc);
+
+			if (!list_empty(&desc->pending)) {
+				/* No errors, keep going */
+				if (desc->status != DMA_ERROR)
+					return desc;
+
+				/* Error, free remaining commands and move on */
+				ccp_free_cmd_resources(desc->ccp,
+						       &desc->pending);
+			}
+
+			tx_desc = &desc->tx_desc;
+		} else {
+			tx_desc = NULL;
+		}
+
+		spin_lock_irqsave(&chan->lock, flags);
+
+		if (desc) {
+			if (desc->status != DMA_ERROR)
+				desc->status = DMA_COMPLETE;
+
+			dev_dbg(desc->ccp->dev,
+				"%s - tx %d complete, status=%u\n", __func__,
+				desc->tx_desc.cookie, desc->status);
+
+			dma_cookie_complete(tx_desc);
+			dma_descriptor_unmap(tx_desc);
+		}
+
+		desc = __ccp_next_dma_desc(chan, desc);
+
+		spin_unlock_irqrestore(&chan->lock, flags);
+
+		if (tx_desc) {
+			dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+
+			dma_run_dependencies(tx_desc);
+		}
+	} while (desc);
+
+	return NULL;
+}
+
+static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
+{
+	struct ccp_dma_desc *desc;
+
+	if (list_empty(&chan->pending))
+		return NULL;
+
+	desc = list_empty(&chan->active)
+		? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
+		: NULL;
+
+	list_splice_tail_init(&chan->pending, &chan->active);
+
+	return desc;
+}
+
+static void ccp_cmd_callback(void *data, int err)
+{
+	struct ccp_dma_desc *desc = data;
+	struct ccp_dma_chan *chan;
+	int ret;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
+			    dma_chan);
+
+	dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
+		__func__, desc->tx_desc.cookie, err);
+
+	if (err)
+		desc->status = DMA_ERROR;
+
+	while (true) {
+		/* Check for DMA descriptor completion */
+		desc = ccp_handle_active_desc(chan, desc);
+
+		/* Don't submit cmd if no descriptor or DMA is paused */
+		if (!desc || (chan->status == DMA_PAUSED))
+			break;
+
+		ret = ccp_issue_next_cmd(desc);
+		if (!ret)
+			break;
+
+		desc->status = DMA_ERROR;
+	}
+
+	tasklet_schedule(&chan->cleanup_tasklet);
+}
+
+static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
+{
+	struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
+						 tx_desc);
+	struct ccp_dma_chan *chan;
+	dma_cookie_t cookie;
+	unsigned long flags;
+
+	chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	cookie = dma_cookie_assign(tx_desc);
+	list_del(&desc->entry);
+	list_add_tail(&desc->entry, &chan->pending);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
+		__func__, cookie);
+
+	return cookie;
+}
+
+static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
+{
+	struct ccp_dma_cmd *cmd;
+
+	cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
+	if (cmd)
+		memset(cmd, 0, sizeof(*cmd));
+
+	return cmd;
+}
+
+static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
+					       unsigned long flags)
+{
+	struct ccp_dma_desc *desc;
+
+	desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
+	if (!desc)
+		return NULL;
+
+	dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
+	desc->tx_desc.flags = flags;
+	desc->tx_desc.tx_submit = ccp_tx_submit;
+	desc->ccp = chan->ccp;
+	INIT_LIST_HEAD(&desc->pending);
+	INIT_LIST_HEAD(&desc->active);
+	desc->status = DMA_IN_PROGRESS;
+
+	return desc;
+}
+
+static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
+					    struct scatterlist *dst_sg,
+					    unsigned int dst_nents,
+					    struct scatterlist *src_sg,
+					    unsigned int src_nents,
+					    unsigned long flags)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_device *ccp = chan->ccp;
+	struct ccp_dma_desc *desc;
+	struct ccp_dma_cmd *cmd;
+	struct ccp_cmd *ccp_cmd;
+	struct ccp_passthru_nomap_engine *ccp_pt;
+	unsigned int src_offset, src_len;
+	unsigned int dst_offset, dst_len;
+	unsigned int len;
+	unsigned long sflags;
+	size_t total_len;
+
+	if (!dst_sg || !src_sg)
+		return NULL;
+
+	if (!dst_nents || !src_nents)
+		return NULL;
+
+	desc = ccp_alloc_dma_desc(chan, flags);
+	if (!desc)
+		return NULL;
+
+	total_len = 0;
+
+	src_len = sg_dma_len(src_sg);
+	src_offset = 0;
+
+	dst_len = sg_dma_len(dst_sg);
+	dst_offset = 0;
+
+	while (true) {
+		if (!src_len) {
+			src_nents--;
+			if (!src_nents)
+				break;
+
+			src_sg = sg_next(src_sg);
+			if (!src_sg)
+				break;
+
+			src_len = sg_dma_len(src_sg);
+			src_offset = 0;
+			continue;
+		}
+
+		if (!dst_len) {
+			dst_nents--;
+			if (!dst_nents)
+				break;
+
+			dst_sg = sg_next(dst_sg);
+			if (!dst_sg)
+				break;
+
+			dst_len = sg_dma_len(dst_sg);
+			dst_offset = 0;
+			continue;
+		}
+
+		len = min(dst_len, src_len);
+
+		cmd = ccp_alloc_dma_cmd(chan);
+		if (!cmd)
+			goto err;
+
+		ccp_cmd = &cmd->ccp_cmd;
+		ccp_cmd->ccp = chan->ccp;
+		ccp_pt = &ccp_cmd->u.passthru_nomap;
+		ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
+		ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
+		ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
+		ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
+		ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
+		ccp_pt->src_len = len;
+		ccp_pt->final = 1;
+		ccp_cmd->callback = ccp_cmd_callback;
+		ccp_cmd->data = desc;
+
+		list_add_tail(&cmd->entry, &desc->pending);
+
+		dev_dbg(ccp->dev,
+			"%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
+			cmd, &ccp_pt->src_dma,
+			&ccp_pt->dst_dma, ccp_pt->src_len);
+
+		total_len += len;
+
+		src_len -= len;
+		src_offset += len;
+
+		dst_len -= len;
+		dst_offset += len;
+	}
+
+	desc->len = total_len;
+
+	if (list_empty(&desc->pending))
+		goto err;
+
+	dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
+
+	spin_lock_irqsave(&chan->lock, sflags);
+
+	list_add_tail(&desc->entry, &chan->created);
+
+	spin_unlock_irqrestore(&chan->lock, sflags);
+
+	return desc;
+
+err:
+	ccp_free_cmd_resources(ccp, &desc->pending);
+	kmem_cache_free(ccp->dma_desc_cache, desc);
+
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
+	struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
+	unsigned long flags)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+	struct scatterlist dst_sg, src_sg;
+
+	dev_dbg(chan->ccp->dev,
+		"%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
+		__func__, &src, &dst, len, flags);
+
+	sg_init_table(&dst_sg, 1);
+	sg_dma_address(&dst_sg) = dst;
+	sg_dma_len(&dst_sg) = len;
+
+	sg_init_table(&src_sg, 1);
+	sg_dma_address(&src_sg) = src;
+	sg_dma_len(&src_sg) = len;
+
+	desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
+	if (!desc)
+		return NULL;
+
+	return &desc->tx_desc;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
+	struct dma_chan *dma_chan, unsigned long flags)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+
+	desc = ccp_alloc_dma_desc(chan, flags);
+	if (!desc)
+		return NULL;
+
+	return &desc->tx_desc;
+}
+
+static void ccp_issue_pending(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+	unsigned long flags;
+
+	dev_dbg(chan->ccp->dev, "%s\n", __func__);
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	desc = __ccp_pending_to_active(chan);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	/* If there was nothing active, start processing */
+	if (desc)
+		ccp_cmd_callback(desc, 0);
+}
+
+static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
+				     dma_cookie_t cookie,
+				     struct dma_tx_state *state)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+	enum dma_status ret;
+	unsigned long flags;
+
+	if (chan->status == DMA_PAUSED) {
+		ret = DMA_PAUSED;
+		goto out;
+	}
+
+	ret = dma_cookie_status(dma_chan, cookie, state);
+	if (ret == DMA_COMPLETE) {
+		spin_lock_irqsave(&chan->lock, flags);
+
+		/* Get status from complete chain, if still there */
+		list_for_each_entry(desc, &chan->complete, entry) {
+			if (desc->tx_desc.cookie != cookie)
+				continue;
+
+			ret = desc->status;
+			break;
+		}
+
+		spin_unlock_irqrestore(&chan->lock, flags);
+	}
+
+out:
+	dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
+
+	return ret;
+}
+
+static int ccp_pause(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+
+	chan->status = DMA_PAUSED;
+
+	/*TODO: Wait for active DMA to complete before returning? */
+
+	return 0;
+}
+
+static int ccp_resume(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
+					entry);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	/* Indicate the channel is running again */
+	chan->status = DMA_IN_PROGRESS;
+
+	/* If there was something active, re-start */
+	if (desc)
+		ccp_cmd_callback(desc, 0);
+
+	return 0;
+}
+
+static int ccp_terminate_all(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	unsigned long flags;
+
+	dev_dbg(chan->ccp->dev, "%s\n", __func__);
+
+	/*TODO: Wait for active DMA to complete before continuing */
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	/*TODO: Purge the complete list? */
+	ccp_free_desc_resources(chan->ccp, &chan->active);
+	ccp_free_desc_resources(chan->ccp, &chan->pending);
+	ccp_free_desc_resources(chan->ccp, &chan->created);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return 0;
+}
+
+int ccp_dmaengine_register(struct ccp_device *ccp)
+{
+	struct ccp_dma_chan *chan;
+	struct dma_device *dma_dev = &ccp->dma_dev;
+	struct dma_chan *dma_chan;
+	char *dma_cmd_cache_name;
+	char *dma_desc_cache_name;
+	unsigned int i;
+	int ret;
+
+	ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
+					 sizeof(*(ccp->ccp_dma_chan)),
+					 GFP_KERNEL);
+	if (!ccp->ccp_dma_chan)
+		return -ENOMEM;
+
+	dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
+					    "%s-dmaengine-cmd-cache",
+					    ccp->name);
+	if (!dma_cmd_cache_name)
+		return -ENOMEM;
+
+	ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
+					       sizeof(struct ccp_dma_cmd),
+					       sizeof(void *),
+					       SLAB_HWCACHE_ALIGN, NULL);
+	if (!ccp->dma_cmd_cache)
+		return -ENOMEM;
+
+	dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
+					     "%s-dmaengine-desc-cache",
+					     ccp->name);
+	if (!dma_desc_cache_name) {
+		ret = -ENOMEM;
+		goto err_cache;
+	}
+
+	ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
+						sizeof(struct ccp_dma_desc),
+						sizeof(void *),
+						SLAB_HWCACHE_ALIGN, NULL);
+	if (!ccp->dma_desc_cache) {
+		ret = -ENOMEM;
+		goto err_cache;
+	}
+
+	dma_dev->dev = ccp->dev;
+	dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
+	dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
+	dma_dev->directions = DMA_MEM_TO_MEM;
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+	/* The DMA channels for this device can be set to public or private,
+	 * and overridden by the module parameter dma_chan_attr.
+	 * Default: according to the value in vdata (dma_chan_attr=0)
+	 * dma_chan_attr=0x1: all channels private (override vdata)
+	 * dma_chan_attr=0x2: all channels public (override vdata)
+	 */
+	if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
+		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+	INIT_LIST_HEAD(&dma_dev->channels);
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		chan = ccp->ccp_dma_chan + i;
+		dma_chan = &chan->dma_chan;
+
+		chan->ccp = ccp;
+
+		spin_lock_init(&chan->lock);
+		INIT_LIST_HEAD(&chan->created);
+		INIT_LIST_HEAD(&chan->pending);
+		INIT_LIST_HEAD(&chan->active);
+		INIT_LIST_HEAD(&chan->complete);
+
+		tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
+			     (unsigned long)chan);
+
+		dma_chan->device = dma_dev;
+		dma_cookie_init(dma_chan);
+
+		list_add_tail(&dma_chan->device_node, &dma_dev->channels);
+	}
+
+	dma_dev->device_free_chan_resources = ccp_free_chan_resources;
+	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
+	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
+	dma_dev->device_issue_pending = ccp_issue_pending;
+	dma_dev->device_tx_status = ccp_tx_status;
+	dma_dev->device_pause = ccp_pause;
+	dma_dev->device_resume = ccp_resume;
+	dma_dev->device_terminate_all = ccp_terminate_all;
+
+	ret = dma_async_device_register(dma_dev);
+	if (ret)
+		goto err_reg;
+
+	return 0;
+
+err_reg:
+	kmem_cache_destroy(ccp->dma_desc_cache);
+
+err_cache:
+	kmem_cache_destroy(ccp->dma_cmd_cache);
+
+	return ret;
+}
+
+void ccp_dmaengine_unregister(struct ccp_device *ccp)
+{
+	struct dma_device *dma_dev = &ccp->dma_dev;
+
+	dma_async_device_unregister(dma_dev);
+
+	kmem_cache_destroy(ccp->dma_desc_cache);
+	kmem_cache_destroy(ccp->dma_cmd_cache);
+}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
new file mode 100644
index 0000000..0ea43cd
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -0,0 +1,2473 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/des.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+/* SHA initial context values */
+static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
+	cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+	cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+	cpu_to_be32(SHA1_H4),
+};
+
+static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
+	cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+	cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+	cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+	cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
+	cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+	cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+	cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
+static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
+	cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
+	cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
+	cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
+	cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
+};
+
+static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
+	cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
+	cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
+	cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
+	cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
+};
+
+#define	CCP_NEW_JOBID(ccp)	((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
+					ccp_gen_jobid(ccp) : 0)
+
+static u32 ccp_gen_jobid(struct ccp_device *ccp)
+{
+	return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
+}
+
+static void ccp_sg_free(struct ccp_sg_workarea *wa)
+{
+	if (wa->dma_count)
+		dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
+
+	wa->dma_count = 0;
+}
+
+static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
+				struct scatterlist *sg, u64 len,
+				enum dma_data_direction dma_dir)
+{
+	memset(wa, 0, sizeof(*wa));
+
+	wa->sg = sg;
+	if (!sg)
+		return 0;
+
+	wa->nents = sg_nents_for_len(sg, len);
+	if (wa->nents < 0)
+		return wa->nents;
+
+	wa->bytes_left = len;
+	wa->sg_used = 0;
+
+	if (len == 0)
+		return 0;
+
+	if (dma_dir == DMA_NONE)
+		return 0;
+
+	wa->dma_sg = sg;
+	wa->dma_dev = dev;
+	wa->dma_dir = dma_dir;
+	wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
+	if (!wa->dma_count)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
+{
+	unsigned int nbytes = min_t(u64, len, wa->bytes_left);
+
+	if (!wa->sg)
+		return;
+
+	wa->sg_used += nbytes;
+	wa->bytes_left -= nbytes;
+	if (wa->sg_used == wa->sg->length) {
+		wa->sg = sg_next(wa->sg);
+		wa->sg_used = 0;
+	}
+}
+
+static void ccp_dm_free(struct ccp_dm_workarea *wa)
+{
+	if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
+		if (wa->address)
+			dma_pool_free(wa->dma_pool, wa->address,
+				      wa->dma.address);
+	} else {
+		if (wa->dma.address)
+			dma_unmap_single(wa->dev, wa->dma.address, wa->length,
+					 wa->dma.dir);
+		kfree(wa->address);
+	}
+
+	wa->address = NULL;
+	wa->dma.address = 0;
+}
+
+static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
+				struct ccp_cmd_queue *cmd_q,
+				unsigned int len,
+				enum dma_data_direction dir)
+{
+	memset(wa, 0, sizeof(*wa));
+
+	if (!len)
+		return 0;
+
+	wa->dev = cmd_q->ccp->dev;
+	wa->length = len;
+
+	if (len <= CCP_DMAPOOL_MAX_SIZE) {
+		wa->dma_pool = cmd_q->dma_pool;
+
+		wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
+					     &wa->dma.address);
+		if (!wa->address)
+			return -ENOMEM;
+
+		wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
+
+		memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
+	} else {
+		wa->address = kzalloc(len, GFP_KERNEL);
+		if (!wa->address)
+			return -ENOMEM;
+
+		wa->dma.address = dma_map_single(wa->dev, wa->address, len,
+						 dir);
+		if (dma_mapping_error(wa->dev, wa->dma.address))
+			return -ENOMEM;
+
+		wa->dma.length = len;
+	}
+	wa->dma.dir = dir;
+
+	return 0;
+}
+
+static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+			   struct scatterlist *sg, unsigned int sg_offset,
+			   unsigned int len)
+{
+	WARN_ON(!wa->address);
+
+	if (len > (wa->length - wa_offset))
+		return -EINVAL;
+
+	scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+				 0);
+	return 0;
+}
+
+static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+			    struct scatterlist *sg, unsigned int sg_offset,
+			    unsigned int len)
+{
+	WARN_ON(!wa->address);
+
+	scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+				 1);
+}
+
+static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
+				   unsigned int wa_offset,
+				   struct scatterlist *sg,
+				   unsigned int sg_offset,
+				   unsigned int len)
+{
+	u8 *p, *q;
+	int	rc;
+
+	rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
+	if (rc)
+		return rc;
+
+	p = wa->address + wa_offset;
+	q = p + len - 1;
+	while (p < q) {
+		*p = *p ^ *q;
+		*q = *p ^ *q;
+		*p = *p ^ *q;
+		p++;
+		q--;
+	}
+	return 0;
+}
+
+static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
+				    unsigned int wa_offset,
+				    struct scatterlist *sg,
+				    unsigned int sg_offset,
+				    unsigned int len)
+{
+	u8 *p, *q;
+
+	p = wa->address + wa_offset;
+	q = p + len - 1;
+	while (p < q) {
+		*p = *p ^ *q;
+		*q = *p ^ *q;
+		*p = *p ^ *q;
+		p++;
+		q--;
+	}
+
+	ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
+}
+
+static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
+{
+	ccp_dm_free(&data->dm_wa);
+	ccp_sg_free(&data->sg_wa);
+}
+
+static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
+			 struct scatterlist *sg, u64 sg_len,
+			 unsigned int dm_len,
+			 enum dma_data_direction dir)
+{
+	int ret;
+
+	memset(data, 0, sizeof(*data));
+
+	ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
+				   dir);
+	if (ret)
+		goto e_err;
+
+	ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
+	if (ret)
+		goto e_err;
+
+	return 0;
+
+e_err:
+	ccp_free_data(data, cmd_q);
+
+	return ret;
+}
+
+static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
+{
+	struct ccp_sg_workarea *sg_wa = &data->sg_wa;
+	struct ccp_dm_workarea *dm_wa = &data->dm_wa;
+	unsigned int buf_count, nbytes;
+
+	/* Clear the buffer if setting it */
+	if (!from)
+		memset(dm_wa->address, 0, dm_wa->length);
+
+	if (!sg_wa->sg)
+		return 0;
+
+	/* Perform the copy operation
+	 *   nbytes will always be <= UINT_MAX because dm_wa->length is
+	 *   an unsigned int
+	 */
+	nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
+	scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
+				 nbytes, from);
+
+	/* Update the structures and generate the count */
+	buf_count = 0;
+	while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
+		nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
+			     dm_wa->length - buf_count);
+		nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
+
+		buf_count += nbytes;
+		ccp_update_sg_workarea(sg_wa, nbytes);
+	}
+
+	return buf_count;
+}
+
+static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
+{
+	return ccp_queue_buf(data, 0);
+}
+
+static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
+{
+	return ccp_queue_buf(data, 1);
+}
+
+static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
+			     struct ccp_op *op, unsigned int block_size,
+			     bool blocksize_op)
+{
+	unsigned int sg_src_len, sg_dst_len, op_len;
+
+	/* The CCP can only DMA from/to one address each per operation. This
+	 * requires that we find the smallest DMA area between the source
+	 * and destination. The resulting len values will always be <= UINT_MAX
+	 * because the dma length is an unsigned int.
+	 */
+	sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
+	sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
+
+	if (dst) {
+		sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
+		sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
+		op_len = min(sg_src_len, sg_dst_len);
+	} else {
+		op_len = sg_src_len;
+	}
+
+	/* The data operation length will be at least block_size in length
+	 * or the smaller of available sg room remaining for the source or
+	 * the destination
+	 */
+	op_len = max(op_len, block_size);
+
+	/* Unless we have to buffer data, there's no reason to wait */
+	op->soc = 0;
+
+	if (sg_src_len < block_size) {
+		/* Not enough data in the sg element, so it
+		 * needs to be buffered into a blocksize chunk
+		 */
+		int cp_len = ccp_fill_queue_buf(src);
+
+		op->soc = 1;
+		op->src.u.dma.address = src->dm_wa.dma.address;
+		op->src.u.dma.offset = 0;
+		op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
+	} else {
+		/* Enough data in the sg element, but we need to
+		 * adjust for any previously copied data
+		 */
+		op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
+		op->src.u.dma.offset = src->sg_wa.sg_used;
+		op->src.u.dma.length = op_len & ~(block_size - 1);
+
+		ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
+	}
+
+	if (dst) {
+		if (sg_dst_len < block_size) {
+			/* Not enough room in the sg element or we're on the
+			 * last piece of data (when using padding), so the
+			 * output needs to be buffered into a blocksize chunk
+			 */
+			op->soc = 1;
+			op->dst.u.dma.address = dst->dm_wa.dma.address;
+			op->dst.u.dma.offset = 0;
+			op->dst.u.dma.length = op->src.u.dma.length;
+		} else {
+			/* Enough room in the sg element, but we need to
+			 * adjust for any previously used area
+			 */
+			op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
+			op->dst.u.dma.offset = dst->sg_wa.sg_used;
+			op->dst.u.dma.length = op->src.u.dma.length;
+		}
+	}
+}
+
+static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
+			     struct ccp_op *op)
+{
+	op->init = 0;
+
+	if (dst) {
+		if (op->dst.u.dma.address == dst->dm_wa.dma.address)
+			ccp_empty_queue_buf(dst);
+		else
+			ccp_update_sg_workarea(&dst->sg_wa,
+					       op->dst.u.dma.length);
+	}
+}
+
+static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
+			       struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+			       u32 byte_swap, bool from)
+{
+	struct ccp_op op;
+
+	memset(&op, 0, sizeof(op));
+
+	op.cmd_q = cmd_q;
+	op.jobid = jobid;
+	op.eom = 1;
+
+	if (from) {
+		op.soc = 1;
+		op.src.type = CCP_MEMTYPE_SB;
+		op.src.u.sb = sb;
+		op.dst.type = CCP_MEMTYPE_SYSTEM;
+		op.dst.u.dma.address = wa->dma.address;
+		op.dst.u.dma.length = wa->length;
+	} else {
+		op.src.type = CCP_MEMTYPE_SYSTEM;
+		op.src.u.dma.address = wa->dma.address;
+		op.src.u.dma.length = wa->length;
+		op.dst.type = CCP_MEMTYPE_SB;
+		op.dst.u.sb = sb;
+	}
+
+	op.u.passthru.byte_swap = byte_swap;
+
+	return cmd_q->ccp->vdata->perform->passthru(&op);
+}
+
+static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
+			  struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+			  u32 byte_swap)
+{
+	return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
+}
+
+static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
+			    struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+			    u32 byte_swap)
+{
+	return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
+}
+
+static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
+				struct ccp_cmd *cmd)
+{
+	struct ccp_aes_engine *aes = &cmd->u.aes;
+	struct ccp_dm_workarea key, ctx;
+	struct ccp_data src;
+	struct ccp_op op;
+	unsigned int dm_offset;
+	int ret;
+
+	if (!((aes->key_len == AES_KEYSIZE_128) ||
+	      (aes->key_len == AES_KEYSIZE_192) ||
+	      (aes->key_len == AES_KEYSIZE_256)))
+		return -EINVAL;
+
+	if (aes->src_len & (AES_BLOCK_SIZE - 1))
+		return -EINVAL;
+
+	if (aes->iv_len != AES_BLOCK_SIZE)
+		return -EINVAL;
+
+	if (!aes->key || !aes->iv || !aes->src)
+		return -EINVAL;
+
+	if (aes->cmac_final) {
+		if (aes->cmac_key_len != AES_BLOCK_SIZE)
+			return -EINVAL;
+
+		if (!aes->cmac_key)
+			return -EINVAL;
+	}
+
+	BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
+	BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
+
+	ret = -EIO;
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key;
+	op.sb_ctx = cmd_q->sb_ctx;
+	op.init = 1;
+	op.u.aes.type = aes->type;
+	op.u.aes.mode = aes->mode;
+	op.u.aes.action = aes->action;
+
+	/* All supported key sizes fit in a single (32-byte) SB entry
+	 * and must be in little endian format. Use the 256-bit byte
+	 * swap passthru option to convert from big endian to little
+	 * endian.
+	 */
+	ret = ccp_init_dm_workarea(&key, cmd_q,
+				   CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	dm_offset = CCP_SB_BYTES - aes->key_len;
+	ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+	if (ret)
+		goto e_key;
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_key;
+	}
+
+	/* The AES context fits in a single (32-byte) SB entry and
+	 * must be in little endian format. Use the 256-bit byte swap
+	 * passthru option to convert from big endian to little endian.
+	 */
+	ret = ccp_init_dm_workarea(&ctx, cmd_q,
+				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+				   DMA_BIDIRECTIONAL);
+	if (ret)
+		goto e_key;
+
+	dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+	ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+	if (ret)
+		goto e_ctx;
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_ctx;
+	}
+
+	/* Send data to the CCP AES engine */
+	ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
+			    AES_BLOCK_SIZE, DMA_TO_DEVICE);
+	if (ret)
+		goto e_ctx;
+
+	while (src.sg_wa.bytes_left) {
+		ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
+		if (aes->cmac_final && !src.sg_wa.bytes_left) {
+			op.eom = 1;
+
+			/* Push the K1/K2 key to the CCP now */
+			ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
+					       op.sb_ctx,
+					       CCP_PASSTHRU_BYTESWAP_256BIT);
+			if (ret) {
+				cmd->engine_error = cmd_q->cmd_error;
+				goto e_src;
+			}
+
+			ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
+					      aes->cmac_key_len);
+			if (ret)
+				goto e_src;
+			ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+					     CCP_PASSTHRU_BYTESWAP_256BIT);
+			if (ret) {
+				cmd->engine_error = cmd_q->cmd_error;
+				goto e_src;
+			}
+		}
+
+		ret = cmd_q->ccp->vdata->perform->aes(&op);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_src;
+		}
+
+		ccp_process_data(&src, NULL, &op);
+	}
+
+	/* Retrieve the AES context - convert from LE to BE using
+	 * 32-byte (256-bit) byteswapping
+	 */
+	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			       CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_src;
+	}
+
+	/* ...but we only need AES_BLOCK_SIZE bytes */
+	dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+	ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+
+e_src:
+	ccp_free_data(&src, cmd_q);
+
+e_ctx:
+	ccp_dm_free(&ctx);
+
+e_key:
+	ccp_dm_free(&key);
+
+	return ret;
+}
+
+static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+			       struct ccp_cmd *cmd)
+{
+	struct ccp_aes_engine *aes = &cmd->u.aes;
+	struct ccp_dm_workarea key, ctx, final_wa, tag;
+	struct ccp_data src, dst;
+	struct ccp_data aad;
+	struct ccp_op op;
+
+	unsigned long long *final;
+	unsigned int dm_offset;
+	unsigned int ilen;
+	bool in_place = true; /* Default value */
+	int ret;
+
+	struct scatterlist *p_inp, sg_inp[2];
+	struct scatterlist *p_tag, sg_tag[2];
+	struct scatterlist *p_outp, sg_outp[2];
+	struct scatterlist *p_aad;
+
+	if (!aes->iv)
+		return -EINVAL;
+
+	if (!((aes->key_len == AES_KEYSIZE_128) ||
+		(aes->key_len == AES_KEYSIZE_192) ||
+		(aes->key_len == AES_KEYSIZE_256)))
+		return -EINVAL;
+
+	if (!aes->key) /* Gotta have a key SGL */
+		return -EINVAL;
+
+	/* First, decompose the source buffer into AAD & PT,
+	 * and the destination buffer into AAD, CT & tag, or
+	 * the input into CT & tag.
+	 * It is expected that the input and output SGs will
+	 * be valid, even if the AAD and input lengths are 0.
+	 */
+	p_aad = aes->src;
+	p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
+	p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
+	if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+		ilen = aes->src_len;
+		p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
+	} else {
+		/* Input length for decryption includes tag */
+		ilen = aes->src_len - AES_BLOCK_SIZE;
+		p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+	}
+
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+	op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+	op.init = 1;
+	op.u.aes.type = aes->type;
+
+	/* Copy the key to the LSB */
+	ret = ccp_init_dm_workarea(&key, cmd_q,
+				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	dm_offset = CCP_SB_BYTES - aes->key_len;
+	ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+	if (ret)
+		goto e_key;
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_key;
+	}
+
+	/* Copy the context (IV) to the LSB.
+	 * There is an assumption here that the IV is 96 bits in length, plus
+	 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
+	 */
+	ret = ccp_init_dm_workarea(&ctx, cmd_q,
+				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+				   DMA_BIDIRECTIONAL);
+	if (ret)
+		goto e_key;
+
+	dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
+	ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+	if (ret)
+		goto e_ctx;
+
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_ctx;
+	}
+
+	op.init = 1;
+	if (aes->aad_len > 0) {
+		/* Step 1: Run a GHASH over the Additional Authenticated Data */
+		ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
+				    AES_BLOCK_SIZE,
+				    DMA_TO_DEVICE);
+		if (ret)
+			goto e_ctx;
+
+		op.u.aes.mode = CCP_AES_MODE_GHASH;
+		op.u.aes.action = CCP_AES_GHASHAAD;
+
+		while (aad.sg_wa.bytes_left) {
+			ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
+
+			ret = cmd_q->ccp->vdata->perform->aes(&op);
+			if (ret) {
+				cmd->engine_error = cmd_q->cmd_error;
+				goto e_aad;
+			}
+
+			ccp_process_data(&aad, NULL, &op);
+			op.init = 0;
+		}
+	}
+
+	op.u.aes.mode = CCP_AES_MODE_GCTR;
+	op.u.aes.action = aes->action;
+
+	if (ilen > 0) {
+		/* Step 2: Run a GCTR over the plaintext */
+		in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
+
+		ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
+				    AES_BLOCK_SIZE,
+				    in_place ? DMA_BIDIRECTIONAL
+					     : DMA_TO_DEVICE);
+		if (ret)
+			goto e_ctx;
+
+		if (in_place) {
+			dst = src;
+		} else {
+			ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+					    AES_BLOCK_SIZE, DMA_FROM_DEVICE);
+			if (ret)
+				goto e_src;
+		}
+
+		op.soc = 0;
+		op.eom = 0;
+		op.init = 1;
+		while (src.sg_wa.bytes_left) {
+			ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+			if (!src.sg_wa.bytes_left) {
+				unsigned int nbytes = aes->src_len
+						      % AES_BLOCK_SIZE;
+
+				if (nbytes) {
+					op.eom = 1;
+					op.u.aes.size = (nbytes * 8) - 1;
+				}
+			}
+
+			ret = cmd_q->ccp->vdata->perform->aes(&op);
+			if (ret) {
+				cmd->engine_error = cmd_q->cmd_error;
+				goto e_dst;
+			}
+
+			ccp_process_data(&src, &dst, &op);
+			op.init = 0;
+		}
+	}
+
+	/* Step 3: Update the IV portion of the context with the original IV */
+	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			       CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_dst;
+	}
+
+	ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+	if (ret)
+		goto e_dst;
+
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_dst;
+	}
+
+	/* Step 4: Concatenate the lengths of the AAD and source, and
+	 * hash that 16 byte buffer.
+	 */
+	ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
+				   DMA_BIDIRECTIONAL);
+	if (ret)
+		goto e_dst;
+	final = (unsigned long long *) final_wa.address;
+	final[0] = cpu_to_be64(aes->aad_len * 8);
+	final[1] = cpu_to_be64(ilen * 8);
+
+	op.u.aes.mode = CCP_AES_MODE_GHASH;
+	op.u.aes.action = CCP_AES_GHASHFINAL;
+	op.src.type = CCP_MEMTYPE_SYSTEM;
+	op.src.u.dma.address = final_wa.dma.address;
+	op.src.u.dma.length = AES_BLOCK_SIZE;
+	op.dst.type = CCP_MEMTYPE_SYSTEM;
+	op.dst.u.dma.address = final_wa.dma.address;
+	op.dst.u.dma.length = AES_BLOCK_SIZE;
+	op.eom = 1;
+	op.u.aes.size = 0;
+	ret = cmd_q->ccp->vdata->perform->aes(&op);
+	if (ret)
+		goto e_dst;
+
+	if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+		/* Put the ciphered tag after the ciphertext. */
+		ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
+	} else {
+		/* Does this ciphered tag match the input? */
+		ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
+					   DMA_BIDIRECTIONAL);
+		if (ret)
+			goto e_tag;
+		ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+		if (ret)
+			goto e_tag;
+
+		ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
+		ccp_dm_free(&tag);
+	}
+
+e_tag:
+	ccp_dm_free(&final_wa);
+
+e_dst:
+	if (aes->src_len && !in_place)
+		ccp_free_data(&dst, cmd_q);
+
+e_src:
+	if (aes->src_len)
+		ccp_free_data(&src, cmd_q);
+
+e_aad:
+	if (aes->aad_len)
+		ccp_free_data(&aad, cmd_q);
+
+e_ctx:
+	ccp_dm_free(&ctx);
+
+e_key:
+	ccp_dm_free(&key);
+
+	return ret;
+}
+
+static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	struct ccp_aes_engine *aes = &cmd->u.aes;
+	struct ccp_dm_workarea key, ctx;
+	struct ccp_data src, dst;
+	struct ccp_op op;
+	unsigned int dm_offset;
+	bool in_place = false;
+	int ret;
+
+	if (aes->mode == CCP_AES_MODE_CMAC)
+		return ccp_run_aes_cmac_cmd(cmd_q, cmd);
+
+	if (aes->mode == CCP_AES_MODE_GCM)
+		return ccp_run_aes_gcm_cmd(cmd_q, cmd);
+
+	if (!((aes->key_len == AES_KEYSIZE_128) ||
+	      (aes->key_len == AES_KEYSIZE_192) ||
+	      (aes->key_len == AES_KEYSIZE_256)))
+		return -EINVAL;
+
+	if (((aes->mode == CCP_AES_MODE_ECB) ||
+	     (aes->mode == CCP_AES_MODE_CBC) ||
+	     (aes->mode == CCP_AES_MODE_CFB)) &&
+	    (aes->src_len & (AES_BLOCK_SIZE - 1)))
+		return -EINVAL;
+
+	if (!aes->key || !aes->src || !aes->dst)
+		return -EINVAL;
+
+	if (aes->mode != CCP_AES_MODE_ECB) {
+		if (aes->iv_len != AES_BLOCK_SIZE)
+			return -EINVAL;
+
+		if (!aes->iv)
+			return -EINVAL;
+	}
+
+	BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
+	BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
+
+	ret = -EIO;
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key;
+	op.sb_ctx = cmd_q->sb_ctx;
+	op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
+	op.u.aes.type = aes->type;
+	op.u.aes.mode = aes->mode;
+	op.u.aes.action = aes->action;
+
+	/* All supported key sizes fit in a single (32-byte) SB entry
+	 * and must be in little endian format. Use the 256-bit byte
+	 * swap passthru option to convert from big endian to little
+	 * endian.
+	 */
+	ret = ccp_init_dm_workarea(&key, cmd_q,
+				   CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	dm_offset = CCP_SB_BYTES - aes->key_len;
+	ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+	if (ret)
+		goto e_key;
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_key;
+	}
+
+	/* The AES context fits in a single (32-byte) SB entry and
+	 * must be in little endian format. Use the 256-bit byte swap
+	 * passthru option to convert from big endian to little endian.
+	 */
+	ret = ccp_init_dm_workarea(&ctx, cmd_q,
+				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+				   DMA_BIDIRECTIONAL);
+	if (ret)
+		goto e_key;
+
+	if (aes->mode != CCP_AES_MODE_ECB) {
+		/* Load the AES context - convert to LE */
+		dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+		ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+		if (ret)
+			goto e_ctx;
+		ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+				     CCP_PASSTHRU_BYTESWAP_256BIT);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_ctx;
+		}
+	}
+	switch (aes->mode) {
+	case CCP_AES_MODE_CFB: /* CFB128 only */
+	case CCP_AES_MODE_CTR:
+		op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
+		break;
+	default:
+		op.u.aes.size = 0;
+	}
+
+	/* Prepare the input and output data workareas. For in-place
+	 * operations we need to set the dma direction to BIDIRECTIONAL
+	 * and copy the src workarea to the dst workarea.
+	 */
+	if (sg_virt(aes->src) == sg_virt(aes->dst))
+		in_place = true;
+
+	ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
+			    AES_BLOCK_SIZE,
+			    in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	if (ret)
+		goto e_ctx;
+
+	if (in_place) {
+		dst = src;
+	} else {
+		ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
+				    AES_BLOCK_SIZE, DMA_FROM_DEVICE);
+		if (ret)
+			goto e_src;
+	}
+
+	/* Send data to the CCP AES engine */
+	while (src.sg_wa.bytes_left) {
+		ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+		if (!src.sg_wa.bytes_left) {
+			op.eom = 1;
+
+			/* Since we don't retrieve the AES context in ECB
+			 * mode we have to wait for the operation to complete
+			 * on the last piece of data
+			 */
+			if (aes->mode == CCP_AES_MODE_ECB)
+				op.soc = 1;
+		}
+
+		ret = cmd_q->ccp->vdata->perform->aes(&op);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_dst;
+		}
+
+		ccp_process_data(&src, &dst, &op);
+	}
+
+	if (aes->mode != CCP_AES_MODE_ECB) {
+		/* Retrieve the AES context - convert from LE to BE using
+		 * 32-byte (256-bit) byteswapping
+		 */
+		ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+				       CCP_PASSTHRU_BYTESWAP_256BIT);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_dst;
+		}
+
+		/* ...but we only need AES_BLOCK_SIZE bytes */
+		dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+		ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+	}
+
+e_dst:
+	if (!in_place)
+		ccp_free_data(&dst, cmd_q);
+
+e_src:
+	ccp_free_data(&src, cmd_q);
+
+e_ctx:
+	ccp_dm_free(&ctx);
+
+e_key:
+	ccp_dm_free(&key);
+
+	return ret;
+}
+
+static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+			       struct ccp_cmd *cmd)
+{
+	struct ccp_xts_aes_engine *xts = &cmd->u.xts;
+	struct ccp_dm_workarea key, ctx;
+	struct ccp_data src, dst;
+	struct ccp_op op;
+	unsigned int unit_size, dm_offset;
+	bool in_place = false;
+	unsigned int sb_count;
+	enum ccp_aes_type aestype;
+	int ret;
+
+	switch (xts->unit_size) {
+	case CCP_XTS_AES_UNIT_SIZE_16:
+		unit_size = 16;
+		break;
+	case CCP_XTS_AES_UNIT_SIZE_512:
+		unit_size = 512;
+		break;
+	case CCP_XTS_AES_UNIT_SIZE_1024:
+		unit_size = 1024;
+		break;
+	case CCP_XTS_AES_UNIT_SIZE_2048:
+		unit_size = 2048;
+		break;
+	case CCP_XTS_AES_UNIT_SIZE_4096:
+		unit_size = 4096;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (xts->key_len == AES_KEYSIZE_128)
+		aestype = CCP_AES_TYPE_128;
+	else if (xts->key_len == AES_KEYSIZE_256)
+		aestype = CCP_AES_TYPE_256;
+	else
+		return -EINVAL;
+
+	if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
+		return -EINVAL;
+
+	if (xts->iv_len != AES_BLOCK_SIZE)
+		return -EINVAL;
+
+	if (!xts->key || !xts->iv || !xts->src || !xts->dst)
+		return -EINVAL;
+
+	BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
+	BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
+
+	ret = -EIO;
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key;
+	op.sb_ctx = cmd_q->sb_ctx;
+	op.init = 1;
+	op.u.xts.type = aestype;
+	op.u.xts.action = xts->action;
+	op.u.xts.unit_size = xts->unit_size;
+
+	/* A version 3 device only supports 128-bit keys, which fits into a
+	 * single SB entry. A version 5 device uses a 512-bit vector, so two
+	 * SB entries.
+	 */
+	if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+		sb_count = CCP_XTS_AES_KEY_SB_COUNT;
+	else
+		sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
+	ret = ccp_init_dm_workarea(&key, cmd_q,
+				   sb_count * CCP_SB_BYTES,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
+		/* All supported key sizes must be in little endian format.
+		 * Use the 256-bit byte swap passthru option to convert from
+		 * big endian to little endian.
+		 */
+		dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+		ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+		if (ret)
+			goto e_key;
+		ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+		if (ret)
+			goto e_key;
+	} else {
+		/* Version 5 CCPs use a 512-bit space for the key: each portion
+		 * occupies 256 bits, or one entire slot, and is zero-padded.
+		 */
+		unsigned int pad;
+
+		dm_offset = CCP_SB_BYTES;
+		pad = dm_offset - xts->key_len;
+		ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+		if (ret)
+			goto e_key;
+		ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
+				      xts->key_len, xts->key_len);
+		if (ret)
+			goto e_key;
+	}
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_key;
+	}
+
+	/* The AES context fits in a single (32-byte) SB entry and
+	 * for XTS is already in little endian format so no byte swapping
+	 * is needed.
+	 */
+	ret = ccp_init_dm_workarea(&ctx, cmd_q,
+				   CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+				   DMA_BIDIRECTIONAL);
+	if (ret)
+		goto e_key;
+
+	ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
+	if (ret)
+		goto e_ctx;
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_NOOP);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_ctx;
+	}
+
+	/* Prepare the input and output data workareas. For in-place
+	 * operations we need to set the dma direction to BIDIRECTIONAL
+	 * and copy the src workarea to the dst workarea.
+	 */
+	if (sg_virt(xts->src) == sg_virt(xts->dst))
+		in_place = true;
+
+	ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
+			    unit_size,
+			    in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	if (ret)
+		goto e_ctx;
+
+	if (in_place) {
+		dst = src;
+	} else {
+		ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
+				    unit_size, DMA_FROM_DEVICE);
+		if (ret)
+			goto e_src;
+	}
+
+	/* Send data to the CCP AES engine */
+	while (src.sg_wa.bytes_left) {
+		ccp_prepare_data(&src, &dst, &op, unit_size, true);
+		if (!src.sg_wa.bytes_left)
+			op.eom = 1;
+
+		ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_dst;
+		}
+
+		ccp_process_data(&src, &dst, &op);
+	}
+
+	/* Retrieve the AES context - convert from LE to BE using
+	 * 32-byte (256-bit) byteswapping
+	 */
+	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			       CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_dst;
+	}
+
+	/* ...but we only need AES_BLOCK_SIZE bytes */
+	dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+	ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
+
+e_dst:
+	if (!in_place)
+		ccp_free_data(&dst, cmd_q);
+
+e_src:
+	ccp_free_data(&src, cmd_q);
+
+e_ctx:
+	ccp_dm_free(&ctx);
+
+e_key:
+	ccp_dm_free(&key);
+
+	return ret;
+}
+
+static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	struct ccp_des3_engine *des3 = &cmd->u.des3;
+
+	struct ccp_dm_workarea key, ctx;
+	struct ccp_data src, dst;
+	struct ccp_op op;
+	unsigned int dm_offset;
+	unsigned int len_singlekey;
+	bool in_place = false;
+	int ret;
+
+	/* Error checks */
+	if (!cmd_q->ccp->vdata->perform->des3)
+		return -EINVAL;
+
+	if (des3->key_len != DES3_EDE_KEY_SIZE)
+		return -EINVAL;
+
+	if (((des3->mode == CCP_DES3_MODE_ECB) ||
+		(des3->mode == CCP_DES3_MODE_CBC)) &&
+		(des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
+		return -EINVAL;
+
+	if (!des3->key || !des3->src || !des3->dst)
+		return -EINVAL;
+
+	if (des3->mode != CCP_DES3_MODE_ECB) {
+		if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
+			return -EINVAL;
+
+		if (!des3->iv)
+			return -EINVAL;
+	}
+
+	ret = -EIO;
+	/* Zero out all the fields of the command desc */
+	memset(&op, 0, sizeof(op));
+
+	/* Set up the Function field */
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key;
+
+	op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
+	op.u.des3.type = des3->type;
+	op.u.des3.mode = des3->mode;
+	op.u.des3.action = des3->action;
+
+	/*
+	 * All supported key sizes fit in a single (32-byte) KSB entry and
+	 * (like AES) must be in little endian format. Use the 256-bit byte
+	 * swap passthru option to convert from big endian to little endian.
+	 */
+	ret = ccp_init_dm_workarea(&key, cmd_q,
+				   CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	/*
+	 * The contents of the key triplet are in the reverse order of what
+	 * is required by the engine. Copy the 3 pieces individually to put
+	 * them where they belong.
+	 */
+	dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
+
+	len_singlekey = des3->key_len / 3;
+	ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
+			      des3->key, 0, len_singlekey);
+	if (ret)
+		goto e_key;
+	ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
+			      des3->key, len_singlekey, len_singlekey);
+	if (ret)
+		goto e_key;
+	ret = ccp_set_dm_area(&key, dm_offset,
+			      des3->key, 2 * len_singlekey, len_singlekey);
+	if (ret)
+		goto e_key;
+
+	/* Copy the key to the SB */
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_key;
+	}
+
+	/*
+	 * The DES3 context fits in a single (32-byte) KSB entry and
+	 * must be in little endian format. Use the 256-bit byte swap
+	 * passthru option to convert from big endian to little endian.
+	 */
+	if (des3->mode != CCP_DES3_MODE_ECB) {
+		u32 load_mode;
+
+		op.sb_ctx = cmd_q->sb_ctx;
+
+		ret = ccp_init_dm_workarea(&ctx, cmd_q,
+					   CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
+					   DMA_BIDIRECTIONAL);
+		if (ret)
+			goto e_key;
+
+		/* Load the context into the LSB */
+		dm_offset = CCP_SB_BYTES - des3->iv_len;
+		ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
+				      des3->iv_len);
+		if (ret)
+			goto e_ctx;
+
+		if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+			load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
+		else
+			load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+				     load_mode);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_ctx;
+		}
+	}
+
+	/*
+	 * Prepare the input and output data workareas. For in-place
+	 * operations we need to set the dma direction to BIDIRECTIONAL
+	 * and copy the src workarea to the dst workarea.
+	 */
+	if (sg_virt(des3->src) == sg_virt(des3->dst))
+		in_place = true;
+
+	ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
+			DES3_EDE_BLOCK_SIZE,
+			in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	if (ret)
+		goto e_ctx;
+
+	if (in_place)
+		dst = src;
+	else {
+		ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
+				DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
+		if (ret)
+			goto e_src;
+	}
+
+	/* Send data to the CCP DES3 engine */
+	while (src.sg_wa.bytes_left) {
+		ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
+		if (!src.sg_wa.bytes_left) {
+			op.eom = 1;
+
+			/* Since we don't retrieve the context in ECB mode
+			 * we have to wait for the operation to complete
+			 * on the last piece of data
+			 */
+			op.soc = 0;
+		}
+
+		ret = cmd_q->ccp->vdata->perform->des3(&op);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_dst;
+		}
+
+		ccp_process_data(&src, &dst, &op);
+	}
+
+	if (des3->mode != CCP_DES3_MODE_ECB) {
+		/* Retrieve the context and make BE */
+		ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+				       CCP_PASSTHRU_BYTESWAP_256BIT);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_dst;
+		}
+
+		/* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
+		if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+			dm_offset = CCP_SB_BYTES - des3->iv_len;
+		else
+			dm_offset = 0;
+		ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
+				DES3_EDE_BLOCK_SIZE);
+	}
+e_dst:
+	if (!in_place)
+		ccp_free_data(&dst, cmd_q);
+
+e_src:
+	ccp_free_data(&src, cmd_q);
+
+e_ctx:
+	if (des3->mode != CCP_DES3_MODE_ECB)
+		ccp_dm_free(&ctx);
+
+e_key:
+	ccp_dm_free(&key);
+
+	return ret;
+}
+
+static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	struct ccp_sha_engine *sha = &cmd->u.sha;
+	struct ccp_dm_workarea ctx;
+	struct ccp_data src;
+	struct ccp_op op;
+	unsigned int ioffset, ooffset;
+	unsigned int digest_size;
+	int sb_count;
+	const void *init;
+	u64 block_size;
+	int ctx_size;
+	int ret;
+
+	switch (sha->type) {
+	case CCP_SHA_TYPE_1:
+		if (sha->ctx_len < SHA1_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA1_BLOCK_SIZE;
+		break;
+	case CCP_SHA_TYPE_224:
+		if (sha->ctx_len < SHA224_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA224_BLOCK_SIZE;
+		break;
+	case CCP_SHA_TYPE_256:
+		if (sha->ctx_len < SHA256_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA256_BLOCK_SIZE;
+		break;
+	case CCP_SHA_TYPE_384:
+		if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
+		    || sha->ctx_len < SHA384_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA384_BLOCK_SIZE;
+		break;
+	case CCP_SHA_TYPE_512:
+		if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
+		    || sha->ctx_len < SHA512_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA512_BLOCK_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!sha->ctx)
+		return -EINVAL;
+
+	if (!sha->final && (sha->src_len & (block_size - 1)))
+		return -EINVAL;
+
+	/* The version 3 device can't handle zero-length input */
+	if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
+
+		if (!sha->src_len) {
+			unsigned int digest_len;
+			const u8 *sha_zero;
+
+			/* Not final, just return */
+			if (!sha->final)
+				return 0;
+
+			/* CCP can't do a zero length sha operation so the
+			 * caller must buffer the data.
+			 */
+			if (sha->msg_bits)
+				return -EINVAL;
+
+			/* The CCP cannot perform zero-length sha operations
+			 * so the caller is required to buffer data for the
+			 * final operation. However, a sha operation for a
+			 * message with a total length of zero is valid so
+			 * known values are required to supply the result.
+			 */
+			switch (sha->type) {
+			case CCP_SHA_TYPE_1:
+				sha_zero = sha1_zero_message_hash;
+				digest_len = SHA1_DIGEST_SIZE;
+				break;
+			case CCP_SHA_TYPE_224:
+				sha_zero = sha224_zero_message_hash;
+				digest_len = SHA224_DIGEST_SIZE;
+				break;
+			case CCP_SHA_TYPE_256:
+				sha_zero = sha256_zero_message_hash;
+				digest_len = SHA256_DIGEST_SIZE;
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
+						 digest_len, 1);
+
+			return 0;
+		}
+	}
+
+	/* Set variables used throughout */
+	switch (sha->type) {
+	case CCP_SHA_TYPE_1:
+		digest_size = SHA1_DIGEST_SIZE;
+		init = (void *) ccp_sha1_init;
+		ctx_size = SHA1_DIGEST_SIZE;
+		sb_count = 1;
+		if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
+			ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		else
+			ooffset = ioffset = 0;
+		break;
+	case CCP_SHA_TYPE_224:
+		digest_size = SHA224_DIGEST_SIZE;
+		init = (void *) ccp_sha224_init;
+		ctx_size = SHA256_DIGEST_SIZE;
+		sb_count = 1;
+		ioffset = 0;
+		if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
+			ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		else
+			ooffset = 0;
+		break;
+	case CCP_SHA_TYPE_256:
+		digest_size = SHA256_DIGEST_SIZE;
+		init = (void *) ccp_sha256_init;
+		ctx_size = SHA256_DIGEST_SIZE;
+		sb_count = 1;
+		ooffset = ioffset = 0;
+		break;
+	case CCP_SHA_TYPE_384:
+		digest_size = SHA384_DIGEST_SIZE;
+		init = (void *) ccp_sha384_init;
+		ctx_size = SHA512_DIGEST_SIZE;
+		sb_count = 2;
+		ioffset = 0;
+		ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_512:
+		digest_size = SHA512_DIGEST_SIZE;
+		init = (void *) ccp_sha512_init;
+		ctx_size = SHA512_DIGEST_SIZE;
+		sb_count = 2;
+		ooffset = ioffset = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		goto e_data;
+	}
+
+	/* For zero-length plaintext the src pointer is ignored;
+	 * otherwise both parts must be valid
+	 */
+	if (sha->src_len && !sha->src)
+		return -EINVAL;
+
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+	op.u.sha.type = sha->type;
+	op.u.sha.msg_bits = sha->msg_bits;
+
+	/* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
+	 * SHA384/512 require 2 adjacent SB slots, with the right half in the
+	 * first slot, and the left half in the second. Each portion must then
+	 * be in little endian format: use the 256-bit byte swap option.
+	 */
+	ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
+				   DMA_BIDIRECTIONAL);
+	if (ret)
+		return ret;
+	if (sha->first) {
+		switch (sha->type) {
+		case CCP_SHA_TYPE_1:
+		case CCP_SHA_TYPE_224:
+		case CCP_SHA_TYPE_256:
+			memcpy(ctx.address + ioffset, init, ctx_size);
+			break;
+		case CCP_SHA_TYPE_384:
+		case CCP_SHA_TYPE_512:
+			memcpy(ctx.address + ctx_size / 2, init,
+			       ctx_size / 2);
+			memcpy(ctx.address, init + ctx_size / 2,
+			       ctx_size / 2);
+			break;
+		default:
+			ret = -EINVAL;
+			goto e_ctx;
+		}
+	} else {
+		/* Restore the context */
+		ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
+				      sb_count * CCP_SB_BYTES);
+		if (ret)
+			goto e_ctx;
+	}
+
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_ctx;
+	}
+
+	if (sha->src) {
+		/* Send data to the CCP SHA engine; block_size is set above */
+		ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
+				    block_size, DMA_TO_DEVICE);
+		if (ret)
+			goto e_ctx;
+
+		while (src.sg_wa.bytes_left) {
+			ccp_prepare_data(&src, NULL, &op, block_size, false);
+			if (sha->final && !src.sg_wa.bytes_left)
+				op.eom = 1;
+
+			ret = cmd_q->ccp->vdata->perform->sha(&op);
+			if (ret) {
+				cmd->engine_error = cmd_q->cmd_error;
+				goto e_data;
+			}
+
+			ccp_process_data(&src, NULL, &op);
+		}
+	} else {
+		op.eom = 1;
+		ret = cmd_q->ccp->vdata->perform->sha(&op);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_data;
+		}
+	}
+
+	/* Retrieve the SHA context - convert from LE to BE using
+	 * 32-byte (256-bit) byteswapping to BE
+	 */
+	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			       CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_data;
+	}
+
+	if (sha->final) {
+		/* Finishing up, so get the digest */
+		switch (sha->type) {
+		case CCP_SHA_TYPE_1:
+		case CCP_SHA_TYPE_224:
+		case CCP_SHA_TYPE_256:
+			ccp_get_dm_area(&ctx, ooffset,
+					sha->ctx, 0,
+					digest_size);
+			break;
+		case CCP_SHA_TYPE_384:
+		case CCP_SHA_TYPE_512:
+			ccp_get_dm_area(&ctx, 0,
+					sha->ctx, LSB_ITEM_SIZE - ooffset,
+					LSB_ITEM_SIZE);
+			ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
+					sha->ctx, 0,
+					LSB_ITEM_SIZE - ooffset);
+			break;
+		default:
+			ret = -EINVAL;
+			goto e_ctx;
+		}
+	} else {
+		/* Stash the context */
+		ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
+				sb_count * CCP_SB_BYTES);
+	}
+
+	if (sha->final && sha->opad) {
+		/* HMAC operation, recursively perform final SHA */
+		struct ccp_cmd hmac_cmd;
+		struct scatterlist sg;
+		u8 *hmac_buf;
+
+		if (sha->opad_len != block_size) {
+			ret = -EINVAL;
+			goto e_data;
+		}
+
+		hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
+		if (!hmac_buf) {
+			ret = -ENOMEM;
+			goto e_data;
+		}
+		sg_init_one(&sg, hmac_buf, block_size + digest_size);
+
+		scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
+		switch (sha->type) {
+		case CCP_SHA_TYPE_1:
+		case CCP_SHA_TYPE_224:
+		case CCP_SHA_TYPE_256:
+			memcpy(hmac_buf + block_size,
+			       ctx.address + ooffset,
+			       digest_size);
+			break;
+		case CCP_SHA_TYPE_384:
+		case CCP_SHA_TYPE_512:
+			memcpy(hmac_buf + block_size,
+			       ctx.address + LSB_ITEM_SIZE + ooffset,
+			       LSB_ITEM_SIZE);
+			memcpy(hmac_buf + block_size +
+			       (LSB_ITEM_SIZE - ooffset),
+			       ctx.address,
+			       LSB_ITEM_SIZE);
+			break;
+		default:
+			ret = -EINVAL;
+			goto e_ctx;
+		}
+
+		memset(&hmac_cmd, 0, sizeof(hmac_cmd));
+		hmac_cmd.engine = CCP_ENGINE_SHA;
+		hmac_cmd.u.sha.type = sha->type;
+		hmac_cmd.u.sha.ctx = sha->ctx;
+		hmac_cmd.u.sha.ctx_len = sha->ctx_len;
+		hmac_cmd.u.sha.src = &sg;
+		hmac_cmd.u.sha.src_len = block_size + digest_size;
+		hmac_cmd.u.sha.opad = NULL;
+		hmac_cmd.u.sha.opad_len = 0;
+		hmac_cmd.u.sha.first = 1;
+		hmac_cmd.u.sha.final = 1;
+		hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
+
+		ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
+		if (ret)
+			cmd->engine_error = hmac_cmd.engine_error;
+
+		kfree(hmac_buf);
+	}
+
+e_data:
+	if (sha->src)
+		ccp_free_data(&src, cmd_q);
+
+e_ctx:
+	ccp_dm_free(&ctx);
+
+	return ret;
+}
+
+static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	struct ccp_rsa_engine *rsa = &cmd->u.rsa;
+	struct ccp_dm_workarea exp, src, dst;
+	struct ccp_op op;
+	unsigned int sb_count, i_len, o_len;
+	int ret;
+
+	/* Check against the maximum allowable size, in bits */
+	if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
+		return -EINVAL;
+
+	if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
+		return -EINVAL;
+
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
+	/* The RSA modulus must precede the message being acted upon, so
+	 * it must be copied to a DMA area where the message and the
+	 * modulus can be concatenated.  Therefore the input buffer
+	 * length required is twice the output buffer length (which
+	 * must be a multiple of 256-bits).  Compute o_len, i_len in bytes.
+	 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
+	 * required.
+	 */
+	o_len = 32 * ((rsa->key_size + 255) / 256);
+	i_len = o_len * 2;
+
+	sb_count = 0;
+	if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+		/* sb_count is the number of storage block slots required
+		 * for the modulus.
+		 */
+		sb_count = o_len / CCP_SB_BYTES;
+		op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
+								sb_count);
+		if (!op.sb_key)
+			return -EIO;
+	} else {
+		/* A version 5 device allows a modulus size that will not fit
+		 * in the LSB, so the command will transfer it from memory.
+		 * Set the sb key to the default, even though it's not used.
+		 */
+		op.sb_key = cmd_q->sb_key;
+	}
+
+	/* The RSA exponent must be in little endian format. Reverse its
+	 * byte order.
+	 */
+	ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
+	if (ret)
+		goto e_sb;
+
+	ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
+	if (ret)
+		goto e_exp;
+
+	if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+		/* Copy the exponent to the local storage block, using
+		 * as many 32-byte blocks as were allocated above. It's
+		 * already little endian, so no further change is required.
+		 */
+		ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
+				     CCP_PASSTHRU_BYTESWAP_NOOP);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_exp;
+		}
+	} else {
+		/* The exponent can be retrieved from memory via DMA. */
+		op.exp.u.dma.address = exp.dma.address;
+		op.exp.u.dma.offset = 0;
+	}
+
+	/* Concatenate the modulus and the message. Both the modulus and
+	 * the operands must be in little endian format.  Since the input
+	 * is in big endian format it must be converted.
+	 */
+	ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
+	if (ret)
+		goto e_exp;
+
+	ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
+	if (ret)
+		goto e_src;
+	ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
+	if (ret)
+		goto e_src;
+
+	/* Prepare the output area for the operation */
+	ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
+	if (ret)
+		goto e_src;
+
+	op.soc = 1;
+	op.src.u.dma.address = src.dma.address;
+	op.src.u.dma.offset = 0;
+	op.src.u.dma.length = i_len;
+	op.dst.u.dma.address = dst.dma.address;
+	op.dst.u.dma.offset = 0;
+	op.dst.u.dma.length = o_len;
+
+	op.u.rsa.mod_size = rsa->key_size;
+	op.u.rsa.input_len = i_len;
+
+	ret = cmd_q->ccp->vdata->perform->rsa(&op);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_dst;
+	}
+
+	ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
+
+e_dst:
+	ccp_dm_free(&dst);
+
+e_src:
+	ccp_dm_free(&src);
+
+e_exp:
+	ccp_dm_free(&exp);
+
+e_sb:
+	if (sb_count)
+		cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
+
+	return ret;
+}
+
+static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
+				struct ccp_cmd *cmd)
+{
+	struct ccp_passthru_engine *pt = &cmd->u.passthru;
+	struct ccp_dm_workarea mask;
+	struct ccp_data src, dst;
+	struct ccp_op op;
+	bool in_place = false;
+	unsigned int i;
+	int ret = 0;
+
+	if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
+		return -EINVAL;
+
+	if (!pt->src || !pt->dst)
+		return -EINVAL;
+
+	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+		if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
+			return -EINVAL;
+		if (!pt->mask)
+			return -EINVAL;
+	}
+
+	BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
+
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
+	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+		/* Load the mask */
+		op.sb_key = cmd_q->sb_key;
+
+		ret = ccp_init_dm_workarea(&mask, cmd_q,
+					   CCP_PASSTHRU_SB_COUNT *
+					   CCP_SB_BYTES,
+					   DMA_TO_DEVICE);
+		if (ret)
+			return ret;
+
+		ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
+		if (ret)
+			goto e_mask;
+		ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
+				     CCP_PASSTHRU_BYTESWAP_NOOP);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_mask;
+		}
+	}
+
+	/* Prepare the input and output data workareas. For in-place
+	 * operations we need to set the dma direction to BIDIRECTIONAL
+	 * and copy the src workarea to the dst workarea.
+	 */
+	if (sg_virt(pt->src) == sg_virt(pt->dst))
+		in_place = true;
+
+	ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
+			    CCP_PASSTHRU_MASKSIZE,
+			    in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	if (ret)
+		goto e_mask;
+
+	if (in_place) {
+		dst = src;
+	} else {
+		ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
+				    CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
+		if (ret)
+			goto e_src;
+	}
+
+	/* Send data to the CCP Passthru engine
+	 *   Because the CCP engine works on a single source and destination
+	 *   dma address at a time, each entry in the source scatterlist
+	 *   (after the dma_map_sg call) must be less than or equal to the
+	 *   (remaining) length in the destination scatterlist entry and the
+	 *   length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
+	 */
+	dst.sg_wa.sg_used = 0;
+	for (i = 1; i <= src.sg_wa.dma_count; i++) {
+		if (!dst.sg_wa.sg ||
+		    (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
+			ret = -EINVAL;
+			goto e_dst;
+		}
+
+		if (i == src.sg_wa.dma_count) {
+			op.eom = 1;
+			op.soc = 1;
+		}
+
+		op.src.type = CCP_MEMTYPE_SYSTEM;
+		op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
+		op.src.u.dma.offset = 0;
+		op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
+
+		op.dst.type = CCP_MEMTYPE_SYSTEM;
+		op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
+		op.dst.u.dma.offset = dst.sg_wa.sg_used;
+		op.dst.u.dma.length = op.src.u.dma.length;
+
+		ret = cmd_q->ccp->vdata->perform->passthru(&op);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_dst;
+		}
+
+		dst.sg_wa.sg_used += src.sg_wa.sg->length;
+		if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
+			dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
+			dst.sg_wa.sg_used = 0;
+		}
+		src.sg_wa.sg = sg_next(src.sg_wa.sg);
+	}
+
+e_dst:
+	if (!in_place)
+		ccp_free_data(&dst, cmd_q);
+
+e_src:
+	ccp_free_data(&src, cmd_q);
+
+e_mask:
+	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+		ccp_dm_free(&mask);
+
+	return ret;
+}
+
+static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
+				      struct ccp_cmd *cmd)
+{
+	struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
+	struct ccp_dm_workarea mask;
+	struct ccp_op op;
+	int ret;
+
+	if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
+		return -EINVAL;
+
+	if (!pt->src_dma || !pt->dst_dma)
+		return -EINVAL;
+
+	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+		if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
+			return -EINVAL;
+		if (!pt->mask)
+			return -EINVAL;
+	}
+
+	BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
+
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
+	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+		/* Load the mask */
+		op.sb_key = cmd_q->sb_key;
+
+		mask.length = pt->mask_len;
+		mask.dma.address = pt->mask;
+		mask.dma.length = pt->mask_len;
+
+		ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
+				     CCP_PASSTHRU_BYTESWAP_NOOP);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			return ret;
+		}
+	}
+
+	/* Send data to the CCP Passthru engine */
+	op.eom = 1;
+	op.soc = 1;
+
+	op.src.type = CCP_MEMTYPE_SYSTEM;
+	op.src.u.dma.address = pt->src_dma;
+	op.src.u.dma.offset = 0;
+	op.src.u.dma.length = pt->src_len;
+
+	op.dst.type = CCP_MEMTYPE_SYSTEM;
+	op.dst.u.dma.address = pt->dst_dma;
+	op.dst.u.dma.offset = 0;
+	op.dst.u.dma.length = pt->src_len;
+
+	ret = cmd_q->ccp->vdata->perform->passthru(&op);
+	if (ret)
+		cmd->engine_error = cmd_q->cmd_error;
+
+	return ret;
+}
+
+static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+	struct ccp_dm_workarea src, dst;
+	struct ccp_op op;
+	int ret;
+	u8 *save;
+
+	if (!ecc->u.mm.operand_1 ||
+	    (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
+		return -EINVAL;
+
+	if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
+		if (!ecc->u.mm.operand_2 ||
+		    (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
+			return -EINVAL;
+
+	if (!ecc->u.mm.result ||
+	    (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
+		return -EINVAL;
+
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
+	/* Concatenate the modulus and the operands. Both the modulus and
+	 * the operands must be in little endian format.  Since the input
+	 * is in big endian format it must be converted and placed in a
+	 * fixed length buffer.
+	 */
+	ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	/* Save the workarea address since it is updated in order to perform
+	 * the concatenation
+	 */
+	save = src.address;
+
+	/* Copy the ECC modulus */
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
+	if (ret)
+		goto e_src;
+	src.address += CCP_ECC_OPERAND_SIZE;
+
+	/* Copy the first operand */
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
+				      ecc->u.mm.operand_1_len);
+	if (ret)
+		goto e_src;
+	src.address += CCP_ECC_OPERAND_SIZE;
+
+	if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
+		/* Copy the second operand */
+		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
+					      ecc->u.mm.operand_2_len);
+		if (ret)
+			goto e_src;
+		src.address += CCP_ECC_OPERAND_SIZE;
+	}
+
+	/* Restore the workarea address */
+	src.address = save;
+
+	/* Prepare the output area for the operation */
+	ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
+				   DMA_FROM_DEVICE);
+	if (ret)
+		goto e_src;
+
+	op.soc = 1;
+	op.src.u.dma.address = src.dma.address;
+	op.src.u.dma.offset = 0;
+	op.src.u.dma.length = src.length;
+	op.dst.u.dma.address = dst.dma.address;
+	op.dst.u.dma.offset = 0;
+	op.dst.u.dma.length = dst.length;
+
+	op.u.ecc.function = cmd->u.ecc.function;
+
+	ret = cmd_q->ccp->vdata->perform->ecc(&op);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_dst;
+	}
+
+	ecc->ecc_result = le16_to_cpup(
+		(const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
+	if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
+		ret = -EIO;
+		goto e_dst;
+	}
+
+	/* Save the ECC result */
+	ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
+				CCP_ECC_MODULUS_BYTES);
+
+e_dst:
+	ccp_dm_free(&dst);
+
+e_src:
+	ccp_dm_free(&src);
+
+	return ret;
+}
+
+static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+	struct ccp_dm_workarea src, dst;
+	struct ccp_op op;
+	int ret;
+	u8 *save;
+
+	if (!ecc->u.pm.point_1.x ||
+	    (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
+	    !ecc->u.pm.point_1.y ||
+	    (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
+		return -EINVAL;
+
+	if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
+		if (!ecc->u.pm.point_2.x ||
+		    (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
+		    !ecc->u.pm.point_2.y ||
+		    (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
+			return -EINVAL;
+	} else {
+		if (!ecc->u.pm.domain_a ||
+		    (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
+			return -EINVAL;
+
+		if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
+			if (!ecc->u.pm.scalar ||
+			    (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
+				return -EINVAL;
+	}
+
+	if (!ecc->u.pm.result.x ||
+	    (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
+	    !ecc->u.pm.result.y ||
+	    (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
+		return -EINVAL;
+
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
+	/* Concatenate the modulus and the operands. Both the modulus and
+	 * the operands must be in little endian format.  Since the input
+	 * is in big endian format it must be converted and placed in a
+	 * fixed length buffer.
+	 */
+	ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	/* Save the workarea address since it is updated in order to perform
+	 * the concatenation
+	 */
+	save = src.address;
+
+	/* Copy the ECC modulus */
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
+	if (ret)
+		goto e_src;
+	src.address += CCP_ECC_OPERAND_SIZE;
+
+	/* Copy the first point X and Y coordinate */
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
+				      ecc->u.pm.point_1.x_len);
+	if (ret)
+		goto e_src;
+	src.address += CCP_ECC_OPERAND_SIZE;
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
+				      ecc->u.pm.point_1.y_len);
+	if (ret)
+		goto e_src;
+	src.address += CCP_ECC_OPERAND_SIZE;
+
+	/* Set the first point Z coordinate to 1 */
+	*src.address = 0x01;
+	src.address += CCP_ECC_OPERAND_SIZE;
+
+	if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
+		/* Copy the second point X and Y coordinate */
+		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
+					      ecc->u.pm.point_2.x_len);
+		if (ret)
+			goto e_src;
+		src.address += CCP_ECC_OPERAND_SIZE;
+		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
+					      ecc->u.pm.point_2.y_len);
+		if (ret)
+			goto e_src;
+		src.address += CCP_ECC_OPERAND_SIZE;
+
+		/* Set the second point Z coordinate to 1 */
+		*src.address = 0x01;
+		src.address += CCP_ECC_OPERAND_SIZE;
+	} else {
+		/* Copy the Domain "a" parameter */
+		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
+					      ecc->u.pm.domain_a_len);
+		if (ret)
+			goto e_src;
+		src.address += CCP_ECC_OPERAND_SIZE;
+
+		if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
+			/* Copy the scalar value */
+			ret = ccp_reverse_set_dm_area(&src, 0,
+						      ecc->u.pm.scalar, 0,
+						      ecc->u.pm.scalar_len);
+			if (ret)
+				goto e_src;
+			src.address += CCP_ECC_OPERAND_SIZE;
+		}
+	}
+
+	/* Restore the workarea address */
+	src.address = save;
+
+	/* Prepare the output area for the operation */
+	ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
+				   DMA_FROM_DEVICE);
+	if (ret)
+		goto e_src;
+
+	op.soc = 1;
+	op.src.u.dma.address = src.dma.address;
+	op.src.u.dma.offset = 0;
+	op.src.u.dma.length = src.length;
+	op.dst.u.dma.address = dst.dma.address;
+	op.dst.u.dma.offset = 0;
+	op.dst.u.dma.length = dst.length;
+
+	op.u.ecc.function = cmd->u.ecc.function;
+
+	ret = cmd_q->ccp->vdata->perform->ecc(&op);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_dst;
+	}
+
+	ecc->ecc_result = le16_to_cpup(
+		(const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
+	if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
+		ret = -EIO;
+		goto e_dst;
+	}
+
+	/* Save the workarea address since it is updated as we walk through
+	 * to copy the point math result
+	 */
+	save = dst.address;
+
+	/* Save the ECC result X and Y coordinates */
+	ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
+				CCP_ECC_MODULUS_BYTES);
+	dst.address += CCP_ECC_OUTPUT_SIZE;
+	ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
+				CCP_ECC_MODULUS_BYTES);
+	dst.address += CCP_ECC_OUTPUT_SIZE;
+
+	/* Restore the workarea address */
+	dst.address = save;
+
+e_dst:
+	ccp_dm_free(&dst);
+
+e_src:
+	ccp_dm_free(&src);
+
+	return ret;
+}
+
+static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+
+	ecc->ecc_result = 0;
+
+	if (!ecc->mod ||
+	    (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
+		return -EINVAL;
+
+	switch (ecc->function) {
+	case CCP_ECC_FUNCTION_MMUL_384BIT:
+	case CCP_ECC_FUNCTION_MADD_384BIT:
+	case CCP_ECC_FUNCTION_MINV_384BIT:
+		return ccp_run_ecc_mm_cmd(cmd_q, cmd);
+
+	case CCP_ECC_FUNCTION_PADD_384BIT:
+	case CCP_ECC_FUNCTION_PMUL_384BIT:
+	case CCP_ECC_FUNCTION_PDBL_384BIT:
+		return ccp_run_ecc_pm_cmd(cmd_q, cmd);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	int ret;
+
+	cmd->engine_error = 0;
+	cmd_q->cmd_error = 0;
+	cmd_q->int_rcvd = 0;
+	cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
+
+	switch (cmd->engine) {
+	case CCP_ENGINE_AES:
+		ret = ccp_run_aes_cmd(cmd_q, cmd);
+		break;
+	case CCP_ENGINE_XTS_AES_128:
+		ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
+		break;
+	case CCP_ENGINE_DES3:
+		ret = ccp_run_des3_cmd(cmd_q, cmd);
+		break;
+	case CCP_ENGINE_SHA:
+		ret = ccp_run_sha_cmd(cmd_q, cmd);
+		break;
+	case CCP_ENGINE_RSA:
+		ret = ccp_run_rsa_cmd(cmd_q, cmd);
+		break;
+	case CCP_ENGINE_PASSTHRU:
+		if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
+			ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
+		else
+			ret = ccp_run_passthru_cmd(cmd_q, cmd);
+		break;
+	case CCP_ENGINE_ECC:
+		ret = ccp_run_ecc_cmd(cmd_q, cmd);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
new file mode 100644
index 0000000..72790d8
--- /dev/null
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -0,0 +1,956 @@
+/*
+ * AMD Platform Security Processor (PSP) interface
+ *
+ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/ccp.h>
+#include <linux/firmware.h>
+
+#include "sp-dev.h"
+#include "psp-dev.h"
+
+#define SEV_VERSION_GREATER_OR_EQUAL(_maj, _min)	\
+		((psp_master->api_major) >= _maj &&	\
+		 (psp_master->api_minor) >= _min)
+
+#define DEVICE_NAME	"sev"
+#define SEV_FW_FILE	"amd/sev.fw"
+
+static DEFINE_MUTEX(sev_cmd_mutex);
+static struct sev_misc_dev *misc_dev;
+static struct psp_device *psp_master;
+
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
+static struct psp_device *psp_alloc_struct(struct sp_device *sp)
+{
+	struct device *dev = sp->dev;
+	struct psp_device *psp;
+
+	psp = devm_kzalloc(dev, sizeof(*psp), GFP_KERNEL);
+	if (!psp)
+		return NULL;
+
+	psp->dev = dev;
+	psp->sp = sp;
+
+	snprintf(psp->name, sizeof(psp->name), "psp-%u", sp->ord);
+
+	return psp;
+}
+
+static irqreturn_t psp_irq_handler(int irq, void *data)
+{
+	struct psp_device *psp = data;
+	unsigned int status;
+	int reg;
+
+	/* Read the interrupt status: */
+	status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
+
+	/* Check if it is command completion: */
+	if (!(status & PSP_CMD_COMPLETE))
+		goto done;
+
+	/* Check if it is SEV command completion: */
+	reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
+	if (reg & PSP_CMDRESP_RESP) {
+		psp->sev_int_rcvd = 1;
+		wake_up(&psp->sev_int_queue);
+	}
+
+done:
+	/* Clear the interrupt status by writing the same value we read. */
+	iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
+
+	return IRQ_HANDLED;
+}
+
+static int sev_wait_cmd_ioc(struct psp_device *psp,
+			    unsigned int *reg, unsigned int timeout)
+{
+	int ret;
+
+	ret = wait_event_timeout(psp->sev_int_queue,
+			psp->sev_int_rcvd, timeout * HZ);
+	if (!ret)
+		return -ETIMEDOUT;
+
+	*reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
+
+	return 0;
+}
+
+static int sev_cmd_buffer_len(int cmd)
+{
+	switch (cmd) {
+	case SEV_CMD_INIT:			return sizeof(struct sev_data_init);
+	case SEV_CMD_PLATFORM_STATUS:		return sizeof(struct sev_user_data_status);
+	case SEV_CMD_PEK_CSR:			return sizeof(struct sev_data_pek_csr);
+	case SEV_CMD_PEK_CERT_IMPORT:		return sizeof(struct sev_data_pek_cert_import);
+	case SEV_CMD_PDH_CERT_EXPORT:		return sizeof(struct sev_data_pdh_cert_export);
+	case SEV_CMD_LAUNCH_START:		return sizeof(struct sev_data_launch_start);
+	case SEV_CMD_LAUNCH_UPDATE_DATA:	return sizeof(struct sev_data_launch_update_data);
+	case SEV_CMD_LAUNCH_UPDATE_VMSA:	return sizeof(struct sev_data_launch_update_vmsa);
+	case SEV_CMD_LAUNCH_FINISH:		return sizeof(struct sev_data_launch_finish);
+	case SEV_CMD_LAUNCH_MEASURE:		return sizeof(struct sev_data_launch_measure);
+	case SEV_CMD_ACTIVATE:			return sizeof(struct sev_data_activate);
+	case SEV_CMD_DEACTIVATE:		return sizeof(struct sev_data_deactivate);
+	case SEV_CMD_DECOMMISSION:		return sizeof(struct sev_data_decommission);
+	case SEV_CMD_GUEST_STATUS:		return sizeof(struct sev_data_guest_status);
+	case SEV_CMD_DBG_DECRYPT:		return sizeof(struct sev_data_dbg);
+	case SEV_CMD_DBG_ENCRYPT:		return sizeof(struct sev_data_dbg);
+	case SEV_CMD_SEND_START:		return sizeof(struct sev_data_send_start);
+	case SEV_CMD_SEND_UPDATE_DATA:		return sizeof(struct sev_data_send_update_data);
+	case SEV_CMD_SEND_UPDATE_VMSA:		return sizeof(struct sev_data_send_update_vmsa);
+	case SEV_CMD_SEND_FINISH:		return sizeof(struct sev_data_send_finish);
+	case SEV_CMD_RECEIVE_START:		return sizeof(struct sev_data_receive_start);
+	case SEV_CMD_RECEIVE_FINISH:		return sizeof(struct sev_data_receive_finish);
+	case SEV_CMD_RECEIVE_UPDATE_DATA:	return sizeof(struct sev_data_receive_update_data);
+	case SEV_CMD_RECEIVE_UPDATE_VMSA:	return sizeof(struct sev_data_receive_update_vmsa);
+	case SEV_CMD_LAUNCH_UPDATE_SECRET:	return sizeof(struct sev_data_launch_secret);
+	case SEV_CMD_DOWNLOAD_FIRMWARE:		return sizeof(struct sev_data_download_firmware);
+	case SEV_CMD_GET_ID:			return sizeof(struct sev_data_get_id);
+	default:				return 0;
+	}
+
+	return 0;
+}
+
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+{
+	struct psp_device *psp = psp_master;
+	unsigned int phys_lsb, phys_msb;
+	unsigned int reg, ret = 0;
+
+	if (!psp)
+		return -ENODEV;
+
+	if (psp_dead)
+		return -EBUSY;
+
+	/* Get the physical address of the command buffer */
+	phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+	phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+
+	dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+		cmd, phys_msb, phys_lsb, psp_timeout);
+
+	print_hex_dump_debug("(in):  ", DUMP_PREFIX_OFFSET, 16, 2, data,
+			     sev_cmd_buffer_len(cmd), false);
+
+	iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
+	iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
+
+	psp->sev_int_rcvd = 0;
+
+	reg = cmd;
+	reg <<= PSP_CMDRESP_CMD_SHIFT;
+	reg |= PSP_CMDRESP_IOC;
+	iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
+
+	/* wait for command completion */
+	ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
+	if (ret) {
+		if (psp_ret)
+			*psp_ret = 0;
+
+		dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
+		psp_dead = true;
+
+		return ret;
+	}
+
+	psp_timeout = psp_cmd_timeout;
+
+	if (psp_ret)
+		*psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+
+	if (reg & PSP_CMDRESP_ERR_MASK) {
+		dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
+			cmd, reg & PSP_CMDRESP_ERR_MASK);
+		ret = -EIO;
+	}
+
+	print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+			     sev_cmd_buffer_len(cmd), false);
+
+	return ret;
+}
+
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+{
+	int rc;
+
+	mutex_lock(&sev_cmd_mutex);
+	rc = __sev_do_cmd_locked(cmd, data, psp_ret);
+	mutex_unlock(&sev_cmd_mutex);
+
+	return rc;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+	struct psp_device *psp = psp_master;
+	int rc = 0;
+
+	if (!psp)
+		return -ENODEV;
+
+	if (psp->sev_state == SEV_STATE_INIT)
+		return 0;
+
+	rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
+	if (rc)
+		return rc;
+
+	psp->sev_state = SEV_STATE_INIT;
+	dev_dbg(psp->dev, "SEV firmware initialized\n");
+
+	return rc;
+}
+
+int sev_platform_init(int *error)
+{
+	int rc;
+
+	mutex_lock(&sev_cmd_mutex);
+	rc = __sev_platform_init_locked(error);
+	mutex_unlock(&sev_cmd_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(sev_platform_init);
+
+static int __sev_platform_shutdown_locked(int *error)
+{
+	int ret;
+
+	ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+	if (ret)
+		return ret;
+
+	psp_master->sev_state = SEV_STATE_UNINIT;
+	dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
+
+	return ret;
+}
+
+static int sev_platform_shutdown(int *error)
+{
+	int rc;
+
+	mutex_lock(&sev_cmd_mutex);
+	rc = __sev_platform_shutdown_locked(NULL);
+	mutex_unlock(&sev_cmd_mutex);
+
+	return rc;
+}
+
+static int sev_get_platform_state(int *state, int *error)
+{
+	int rc;
+
+	rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
+				 &psp_master->status_cmd_buf, error);
+	if (rc)
+		return rc;
+
+	*state = psp_master->status_cmd_buf.state;
+	return rc;
+}
+
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+{
+	int state, rc;
+
+	/*
+	 * The SEV spec requires that FACTORY_RESET must be issued in
+	 * UNINIT state. Before we go further lets check if any guest is
+	 * active.
+	 *
+	 * If FW is in WORKING state then deny the request otherwise issue
+	 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
+	 *
+	 */
+	rc = sev_get_platform_state(&state, &argp->error);
+	if (rc)
+		return rc;
+
+	if (state == SEV_STATE_WORKING)
+		return -EBUSY;
+
+	if (state == SEV_STATE_INIT) {
+		rc = __sev_platform_shutdown_locked(&argp->error);
+		if (rc)
+			return rc;
+	}
+
+	return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+{
+	struct sev_user_data_status *data = &psp_master->status_cmd_buf;
+	int ret;
+
+	ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+	if (ret)
+		return ret;
+
+	if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+		ret = -EFAULT;
+
+	return ret;
+}
+
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+{
+	int rc;
+
+	if (psp_master->sev_state == SEV_STATE_UNINIT) {
+		rc = __sev_platform_init_locked(&argp->error);
+		if (rc)
+			return rc;
+	}
+
+	return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+}
+
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+{
+	struct sev_user_data_pek_csr input;
+	struct sev_data_pek_csr *data;
+	void *blob = NULL;
+	int ret;
+
+	if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+		return -EFAULT;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	/* userspace wants to query CSR length */
+	if (!input.address || !input.length)
+		goto cmd;
+
+	/* allocate a physically contiguous buffer to store the CSR blob */
+	if (!access_ok(VERIFY_WRITE, input.address, input.length) ||
+	    input.length > SEV_FW_BLOB_MAX_SIZE) {
+		ret = -EFAULT;
+		goto e_free;
+	}
+
+	blob = kmalloc(input.length, GFP_KERNEL);
+	if (!blob) {
+		ret = -ENOMEM;
+		goto e_free;
+	}
+
+	data->address = __psp_pa(blob);
+	data->len = input.length;
+
+cmd:
+	if (psp_master->sev_state == SEV_STATE_UNINIT) {
+		ret = __sev_platform_init_locked(&argp->error);
+		if (ret)
+			goto e_free_blob;
+	}
+
+	ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+
+	 /* If we query the CSR length, FW responded with expected data. */
+	input.length = data->len;
+
+	if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+		ret = -EFAULT;
+		goto e_free_blob;
+	}
+
+	if (blob) {
+		if (copy_to_user((void __user *)input.address, blob, input.length))
+			ret = -EFAULT;
+	}
+
+e_free_blob:
+	kfree(blob);
+e_free:
+	kfree(data);
+	return ret;
+}
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+{
+	if (!uaddr || !len)
+		return ERR_PTR(-EINVAL);
+
+	/* verify that blob length does not exceed our limit */
+	if (len > SEV_FW_BLOB_MAX_SIZE)
+		return ERR_PTR(-EINVAL);
+
+	return memdup_user((void __user *)(uintptr_t)uaddr, len);
+}
+EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+static int sev_get_api_version(void)
+{
+	struct sev_user_data_status *status;
+	int error, ret;
+
+	status = &psp_master->status_cmd_buf;
+	ret = sev_platform_status(status, &error);
+	if (ret) {
+		dev_err(psp_master->dev,
+			"SEV: failed to get status. Error: %#x\n", error);
+		return 1;
+	}
+
+	psp_master->api_major = status->api_major;
+	psp_master->api_minor = status->api_minor;
+	psp_master->build = status->build;
+
+	return 0;
+}
+
+/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
+static int sev_update_firmware(struct device *dev)
+{
+	struct sev_data_download_firmware *data;
+	const struct firmware *firmware;
+	int ret, error, order;
+	struct page *p;
+	u64 data_size;
+
+	ret = request_firmware(&firmware, SEV_FW_FILE, dev);
+	if (ret < 0)
+		return -1;
+
+	/*
+	 * SEV FW expects the physical address given to it to be 32
+	 * byte aligned. Memory allocated has structure placed at the
+	 * beginning followed by the firmware being passed to the SEV
+	 * FW. Allocate enough memory for data structure + alignment
+	 * padding + SEV FW.
+	 */
+	data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
+
+	order = get_order(firmware->size + data_size);
+	p = alloc_pages(GFP_KERNEL, order);
+	if (!p) {
+		ret = -1;
+		goto fw_err;
+	}
+
+	/*
+	 * Copy firmware data to a kernel allocated contiguous
+	 * memory region.
+	 */
+	data = page_address(p);
+	memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+
+	data->address = __psp_pa(page_address(p) + data_size);
+	data->len = firmware->size;
+
+	ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+	if (ret)
+		dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
+	else
+		dev_info(dev, "SEV firmware update successful\n");
+
+	__free_pages(p, order);
+
+fw_err:
+	release_firmware(firmware);
+
+	return ret;
+}
+
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+{
+	struct sev_user_data_pek_cert_import input;
+	struct sev_data_pek_cert_import *data;
+	void *pek_blob, *oca_blob;
+	int ret;
+
+	if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+		return -EFAULT;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	/* copy PEK certificate blobs from userspace */
+	pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
+	if (IS_ERR(pek_blob)) {
+		ret = PTR_ERR(pek_blob);
+		goto e_free;
+	}
+
+	data->pek_cert_address = __psp_pa(pek_blob);
+	data->pek_cert_len = input.pek_cert_len;
+
+	/* copy PEK certificate blobs from userspace */
+	oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
+	if (IS_ERR(oca_blob)) {
+		ret = PTR_ERR(oca_blob);
+		goto e_free_pek;
+	}
+
+	data->oca_cert_address = __psp_pa(oca_blob);
+	data->oca_cert_len = input.oca_cert_len;
+
+	/* If platform is not in INIT state then transition it to INIT */
+	if (psp_master->sev_state != SEV_STATE_INIT) {
+		ret = __sev_platform_init_locked(&argp->error);
+		if (ret)
+			goto e_free_oca;
+	}
+
+	ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+
+e_free_oca:
+	kfree(oca_blob);
+e_free_pek:
+	kfree(pek_blob);
+e_free:
+	kfree(data);
+	return ret;
+}
+
+static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+{
+	struct sev_data_get_id *data;
+	u64 data_size, user_size;
+	void *id_blob, *mem;
+	int ret;
+
+	/* SEV GET_ID available from SEV API v0.16 and up */
+	if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
+		return -ENOTSUPP;
+
+	/* SEV FW expects the buffer it fills with the ID to be
+	 * 8-byte aligned. Memory allocated should be enough to
+	 * hold data structure + alignment padding + memory
+	 * where SEV FW writes the ID.
+	 */
+	data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
+	user_size = sizeof(struct sev_user_data_get_id);
+
+	mem = kzalloc(data_size + user_size, GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	data = mem;
+	id_blob = mem + data_size;
+
+	data->address = __psp_pa(id_blob);
+	data->len = user_size;
+
+	ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+	if (!ret) {
+		if (copy_to_user((void __user *)argp->data, id_blob, data->len))
+			ret = -EFAULT;
+	}
+
+	kfree(mem);
+
+	return ret;
+}
+
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+{
+	struct sev_user_data_pdh_cert_export input;
+	void *pdh_blob = NULL, *cert_blob = NULL;
+	struct sev_data_pdh_cert_export *data;
+	int ret;
+
+	if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+		return -EFAULT;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	/* Userspace wants to query the certificate length. */
+	if (!input.pdh_cert_address ||
+	    !input.pdh_cert_len ||
+	    !input.cert_chain_address)
+		goto cmd;
+
+	/* Allocate a physically contiguous buffer to store the PDH blob. */
+	if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
+	    !access_ok(VERIFY_WRITE, input.pdh_cert_address, input.pdh_cert_len)) {
+		ret = -EFAULT;
+		goto e_free;
+	}
+
+	/* Allocate a physically contiguous buffer to store the cert chain blob. */
+	if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
+	    !access_ok(VERIFY_WRITE, input.cert_chain_address, input.cert_chain_len)) {
+		ret = -EFAULT;
+		goto e_free;
+	}
+
+	pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+	if (!pdh_blob) {
+		ret = -ENOMEM;
+		goto e_free;
+	}
+
+	data->pdh_cert_address = __psp_pa(pdh_blob);
+	data->pdh_cert_len = input.pdh_cert_len;
+
+	cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+	if (!cert_blob) {
+		ret = -ENOMEM;
+		goto e_free_pdh;
+	}
+
+	data->cert_chain_address = __psp_pa(cert_blob);
+	data->cert_chain_len = input.cert_chain_len;
+
+cmd:
+	/* If platform is not in INIT state then transition it to INIT. */
+	if (psp_master->sev_state != SEV_STATE_INIT) {
+		ret = __sev_platform_init_locked(&argp->error);
+		if (ret)
+			goto e_free_cert;
+	}
+
+	ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+
+	/* If we query the length, FW responded with expected data. */
+	input.cert_chain_len = data->cert_chain_len;
+	input.pdh_cert_len = data->pdh_cert_len;
+
+	if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+		ret = -EFAULT;
+		goto e_free_cert;
+	}
+
+	if (pdh_blob) {
+		if (copy_to_user((void __user *)input.pdh_cert_address,
+				 pdh_blob, input.pdh_cert_len)) {
+			ret = -EFAULT;
+			goto e_free_cert;
+		}
+	}
+
+	if (cert_blob) {
+		if (copy_to_user((void __user *)input.cert_chain_address,
+				 cert_blob, input.cert_chain_len))
+			ret = -EFAULT;
+	}
+
+e_free_cert:
+	kfree(cert_blob);
+e_free_pdh:
+	kfree(pdh_blob);
+e_free:
+	kfree(data);
+	return ret;
+}
+
+static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct sev_issue_cmd input;
+	int ret = -EFAULT;
+
+	if (!psp_master)
+		return -ENODEV;
+
+	if (ioctl != SEV_ISSUE_CMD)
+		return -EINVAL;
+
+	if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
+		return -EFAULT;
+
+	if (input.cmd > SEV_MAX)
+		return -EINVAL;
+
+	mutex_lock(&sev_cmd_mutex);
+
+	switch (input.cmd) {
+
+	case SEV_FACTORY_RESET:
+		ret = sev_ioctl_do_reset(&input);
+		break;
+	case SEV_PLATFORM_STATUS:
+		ret = sev_ioctl_do_platform_status(&input);
+		break;
+	case SEV_PEK_GEN:
+		ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+		break;
+	case SEV_PDH_GEN:
+		ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+		break;
+	case SEV_PEK_CSR:
+		ret = sev_ioctl_do_pek_csr(&input);
+		break;
+	case SEV_PEK_CERT_IMPORT:
+		ret = sev_ioctl_do_pek_import(&input);
+		break;
+	case SEV_PDH_CERT_EXPORT:
+		ret = sev_ioctl_do_pdh_export(&input);
+		break;
+	case SEV_GET_ID:
+		ret = sev_ioctl_do_get_id(&input);
+		break;
+	default:
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
+		ret = -EFAULT;
+out:
+	mutex_unlock(&sev_cmd_mutex);
+
+	return ret;
+}
+
+static const struct file_operations sev_fops = {
+	.owner	= THIS_MODULE,
+	.unlocked_ioctl = sev_ioctl,
+};
+
+int sev_platform_status(struct sev_user_data_status *data, int *error)
+{
+	return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_platform_status);
+
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
+{
+	return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_deactivate);
+
+int sev_guest_activate(struct sev_data_activate *data, int *error)
+{
+	return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_activate);
+
+int sev_guest_decommission(struct sev_data_decommission *data, int *error)
+{
+	return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+int sev_guest_df_flush(int *error)
+{
+	return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+static void sev_exit(struct kref *ref)
+{
+	struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
+
+	misc_deregister(&misc_dev->misc);
+}
+
+static int sev_misc_init(struct psp_device *psp)
+{
+	struct device *dev = psp->dev;
+	int ret;
+
+	/*
+	 * SEV feature support can be detected on multiple devices but the SEV
+	 * FW commands must be issued on the master. During probe, we do not
+	 * know the master hence we create /dev/sev on the first device probe.
+	 * sev_do_cmd() finds the right master device to which to issue the
+	 * command to the firmware.
+	 */
+	if (!misc_dev) {
+		struct miscdevice *misc;
+
+		misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+		if (!misc_dev)
+			return -ENOMEM;
+
+		misc = &misc_dev->misc;
+		misc->minor = MISC_DYNAMIC_MINOR;
+		misc->name = DEVICE_NAME;
+		misc->fops = &sev_fops;
+
+		ret = misc_register(misc);
+		if (ret)
+			return ret;
+
+		kref_init(&misc_dev->refcount);
+	} else {
+		kref_get(&misc_dev->refcount);
+	}
+
+	init_waitqueue_head(&psp->sev_int_queue);
+	psp->sev_misc = misc_dev;
+	dev_dbg(dev, "registered SEV device\n");
+
+	return 0;
+}
+
+static int sev_init(struct psp_device *psp)
+{
+	/* Check if device supports SEV feature */
+	if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
+		dev_dbg(psp->dev, "device does not support SEV\n");
+		return 1;
+	}
+
+	return sev_misc_init(psp);
+}
+
+int psp_dev_init(struct sp_device *sp)
+{
+	struct device *dev = sp->dev;
+	struct psp_device *psp;
+	int ret;
+
+	ret = -ENOMEM;
+	psp = psp_alloc_struct(sp);
+	if (!psp)
+		goto e_err;
+
+	sp->psp_data = psp;
+
+	psp->vdata = (struct psp_vdata *)sp->dev_vdata->psp_vdata;
+	if (!psp->vdata) {
+		ret = -ENODEV;
+		dev_err(dev, "missing driver data\n");
+		goto e_err;
+	}
+
+	psp->io_regs = sp->io_map;
+
+	/* Disable and clear interrupts until ready */
+	iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
+	iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
+
+	/* Request an irq */
+	ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
+	if (ret) {
+		dev_err(dev, "psp: unable to allocate an IRQ\n");
+		goto e_err;
+	}
+
+	ret = sev_init(psp);
+	if (ret)
+		goto e_irq;
+
+	if (sp->set_psp_master_device)
+		sp->set_psp_master_device(sp);
+
+	/* Enable interrupt */
+	iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
+
+	dev_notice(dev, "psp enabled\n");
+
+	return 0;
+
+e_irq:
+	sp_free_psp_irq(psp->sp, psp);
+e_err:
+	sp->psp_data = NULL;
+
+	dev_notice(dev, "psp initialization failed\n");
+
+	return ret;
+}
+
+void psp_dev_destroy(struct sp_device *sp)
+{
+	struct psp_device *psp = sp->psp_data;
+
+	if (!psp)
+		return;
+
+	if (psp->sev_misc)
+		kref_put(&misc_dev->refcount, sev_exit);
+
+	sp_free_psp_irq(sp, psp);
+}
+
+int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
+				void *data, int *error)
+{
+	if (!filep || filep->f_op != &sev_fops)
+		return -EBADF;
+
+	return  sev_do_cmd(cmd, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+void psp_pci_init(void)
+{
+	struct sp_device *sp;
+	int error, rc;
+
+	sp = sp_get_psp_master_device();
+	if (!sp)
+		return;
+
+	psp_master = sp->psp_data;
+
+	psp_timeout = psp_probe_timeout;
+
+	if (sev_get_api_version())
+		goto err;
+
+	if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) &&
+	    sev_update_firmware(psp_master->dev) == 0)
+		sev_get_api_version();
+
+	/* Initialize the platform */
+	rc = sev_platform_init(&error);
+	if (rc) {
+		dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+		goto err;
+	}
+
+	dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
+		 psp_master->api_minor, psp_master->build);
+
+	return;
+
+err:
+	psp_master = NULL;
+}
+
+void psp_pci_exit(void)
+{
+	if (!psp_master)
+		return;
+
+	sev_platform_shutdown(NULL);
+}
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
new file mode 100644
index 0000000..8b53a96
--- /dev/null
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -0,0 +1,70 @@
+/*
+ * AMD Platform Security Processor (PSP) interface driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PSP_DEV_H__
+#define __PSP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/psp-sev.h>
+#include <linux/miscdevice.h>
+
+#include "sp-dev.h"
+
+#define PSP_CMD_COMPLETE		BIT(1)
+
+#define PSP_CMDRESP_CMD_SHIFT		16
+#define PSP_CMDRESP_IOC			BIT(0)
+#define PSP_CMDRESP_RESP		BIT(31)
+#define PSP_CMDRESP_ERR_MASK		0xffff
+
+#define MAX_PSP_NAME_LEN		16
+
+struct sev_misc_dev {
+	struct kref refcount;
+	struct miscdevice misc;
+};
+
+struct psp_device {
+	struct list_head entry;
+
+	struct psp_vdata *vdata;
+	char name[MAX_PSP_NAME_LEN];
+
+	struct device *dev;
+	struct sp_device *sp;
+
+	void __iomem *io_regs;
+
+	int sev_state;
+	unsigned int sev_int_rcvd;
+	wait_queue_head_t sev_int_queue;
+	struct sev_misc_dev *sev_misc;
+	struct sev_user_data_status status_cmd_buf;
+	struct sev_data_init init_cmd_buf;
+
+	u8 api_major;
+	u8 api_minor;
+	u8 build;
+};
+
+#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
new file mode 100644
index 0000000..e045900
--- /dev/null
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -0,0 +1,312 @@
+/*
+ * AMD Secure Processor driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "sp-dev.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1.0");
+MODULE_DESCRIPTION("AMD Secure Processor driver");
+
+/* List of SPs, SP count, read-write access lock, and access functions
+ *
+ * Lock structure: get sp_unit_lock for reading whenever we need to
+ * examine the SP list.
+ */
+static DEFINE_RWLOCK(sp_unit_lock);
+static LIST_HEAD(sp_units);
+
+/* Ever-increasing value to produce unique unit numbers */
+static atomic_t sp_ordinal;
+
+static void sp_add_device(struct sp_device *sp)
+{
+	unsigned long flags;
+
+	write_lock_irqsave(&sp_unit_lock, flags);
+
+	list_add_tail(&sp->entry, &sp_units);
+
+	write_unlock_irqrestore(&sp_unit_lock, flags);
+}
+
+static void sp_del_device(struct sp_device *sp)
+{
+	unsigned long flags;
+
+	write_lock_irqsave(&sp_unit_lock, flags);
+
+	list_del(&sp->entry);
+
+	write_unlock_irqrestore(&sp_unit_lock, flags);
+}
+
+static irqreturn_t sp_irq_handler(int irq, void *data)
+{
+	struct sp_device *sp = data;
+
+	if (sp->ccp_irq_handler)
+		sp->ccp_irq_handler(irq, sp->ccp_irq_data);
+
+	if (sp->psp_irq_handler)
+		sp->psp_irq_handler(irq, sp->psp_irq_data);
+
+	return IRQ_HANDLED;
+}
+
+int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
+		       const char *name, void *data)
+{
+	int ret;
+
+	if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
+		/* Need a common routine to manage all interrupts */
+		sp->ccp_irq_data = data;
+		sp->ccp_irq_handler = handler;
+
+		if (!sp->irq_registered) {
+			ret = request_irq(sp->ccp_irq, sp_irq_handler, 0,
+					  sp->name, sp);
+			if (ret)
+				return ret;
+
+			sp->irq_registered = true;
+		}
+	} else {
+		/* Each sub-device can manage it's own interrupt */
+		ret = request_irq(sp->ccp_irq, handler, 0, name, data);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+		       const char *name, void *data)
+{
+	int ret;
+
+	if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
+		/* Need a common routine to manage all interrupts */
+		sp->psp_irq_data = data;
+		sp->psp_irq_handler = handler;
+
+		if (!sp->irq_registered) {
+			ret = request_irq(sp->psp_irq, sp_irq_handler, 0,
+					  sp->name, sp);
+			if (ret)
+				return ret;
+
+			sp->irq_registered = true;
+		}
+	} else {
+		/* Each sub-device can manage it's own interrupt */
+		ret = request_irq(sp->psp_irq, handler, 0, name, data);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void sp_free_ccp_irq(struct sp_device *sp, void *data)
+{
+	if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
+		/* Using common routine to manage all interrupts */
+		if (!sp->psp_irq_handler) {
+			/* Nothing else using it, so free it */
+			free_irq(sp->ccp_irq, sp);
+
+			sp->irq_registered = false;
+		}
+
+		sp->ccp_irq_handler = NULL;
+		sp->ccp_irq_data = NULL;
+	} else {
+		/* Each sub-device can manage it's own interrupt */
+		free_irq(sp->ccp_irq, data);
+	}
+}
+
+void sp_free_psp_irq(struct sp_device *sp, void *data)
+{
+	if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
+		/* Using common routine to manage all interrupts */
+		if (!sp->ccp_irq_handler) {
+			/* Nothing else using it, so free it */
+			free_irq(sp->psp_irq, sp);
+
+			sp->irq_registered = false;
+		}
+
+		sp->psp_irq_handler = NULL;
+		sp->psp_irq_data = NULL;
+	} else {
+		/* Each sub-device can manage it's own interrupt */
+		free_irq(sp->psp_irq, data);
+	}
+}
+
+/**
+ * sp_alloc_struct - allocate and initialize the sp_device struct
+ *
+ * @dev: device struct of the SP
+ */
+struct sp_device *sp_alloc_struct(struct device *dev)
+{
+	struct sp_device *sp;
+
+	sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+	if (!sp)
+		return NULL;
+
+	sp->dev = dev;
+	sp->ord = atomic_inc_return(&sp_ordinal);
+	snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord);
+
+	return sp;
+}
+
+int sp_init(struct sp_device *sp)
+{
+	sp_add_device(sp);
+
+	if (sp->dev_vdata->ccp_vdata)
+		ccp_dev_init(sp);
+
+	if (sp->dev_vdata->psp_vdata)
+		psp_dev_init(sp);
+	return 0;
+}
+
+void sp_destroy(struct sp_device *sp)
+{
+	if (sp->dev_vdata->ccp_vdata)
+		ccp_dev_destroy(sp);
+
+	if (sp->dev_vdata->psp_vdata)
+		psp_dev_destroy(sp);
+
+	sp_del_device(sp);
+}
+
+#ifdef CONFIG_PM
+int sp_suspend(struct sp_device *sp, pm_message_t state)
+{
+	int ret;
+
+	if (sp->dev_vdata->ccp_vdata) {
+		ret = ccp_dev_suspend(sp, state);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int sp_resume(struct sp_device *sp)
+{
+	int ret;
+
+	if (sp->dev_vdata->ccp_vdata) {
+		ret = ccp_dev_resume(sp);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+#endif
+
+struct sp_device *sp_get_psp_master_device(void)
+{
+	struct sp_device *i, *ret = NULL;
+	unsigned long flags;
+
+	write_lock_irqsave(&sp_unit_lock, flags);
+	if (list_empty(&sp_units))
+		goto unlock;
+
+	list_for_each_entry(i, &sp_units, entry) {
+		if (i->psp_data && i->get_psp_master_device) {
+			ret = i->get_psp_master_device();
+			break;
+		}
+	}
+
+unlock:
+	write_unlock_irqrestore(&sp_unit_lock, flags);
+	return ret;
+}
+
+static int __init sp_mod_init(void)
+{
+#ifdef CONFIG_X86
+	int ret;
+
+	ret = sp_pci_init();
+	if (ret)
+		return ret;
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+	psp_pci_init();
+#endif
+
+	return 0;
+#endif
+
+#ifdef CONFIG_ARM64
+	int ret;
+
+	ret = sp_platform_init();
+	if (ret)
+		return ret;
+
+	return 0;
+#endif
+
+	return -ENODEV;
+}
+
+static void __exit sp_mod_exit(void)
+{
+#ifdef CONFIG_X86
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+	psp_pci_exit();
+#endif
+
+	sp_pci_exit();
+#endif
+
+#ifdef CONFIG_ARM64
+	sp_platform_exit();
+#endif
+}
+
+module_init(sp_mod_init);
+module_exit(sp_mod_exit);
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
new file mode 100644
index 0000000..14398ca
--- /dev/null
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -0,0 +1,164 @@
+/*
+ * AMD Secure Processor driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SP_DEV_H__
+#define __SP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#define SP_MAX_NAME_LEN		32
+
+#define CACHE_NONE			0x00
+#define CACHE_WB_NO_ALLOC		0xb7
+
+/* Structure to hold CCP device data */
+struct ccp_device;
+struct ccp_vdata {
+	const unsigned int version;
+	const unsigned int dma_chan_attr;
+	void (*setup)(struct ccp_device *);
+	const struct ccp_actions *perform;
+	const unsigned int offset;
+	const unsigned int rsamax;
+};
+
+struct psp_vdata {
+	const unsigned int cmdresp_reg;
+	const unsigned int cmdbuff_addr_lo_reg;
+	const unsigned int cmdbuff_addr_hi_reg;
+	const unsigned int feature_reg;
+	const unsigned int inten_reg;
+	const unsigned int intsts_reg;
+};
+
+/* Structure to hold SP device data */
+struct sp_dev_vdata {
+	const unsigned int bar;
+
+	const struct ccp_vdata *ccp_vdata;
+	const struct psp_vdata *psp_vdata;
+};
+
+struct sp_device {
+	struct list_head entry;
+
+	struct device *dev;
+
+	struct sp_dev_vdata *dev_vdata;
+	unsigned int ord;
+	char name[SP_MAX_NAME_LEN];
+
+	/* Bus specific device information */
+	void *dev_specific;
+
+	/* I/O area used for device communication. */
+	void __iomem *io_map;
+
+	/* DMA caching attribute support */
+	unsigned int axcache;
+
+	/* get and set master device */
+	struct sp_device*(*get_psp_master_device)(void);
+	void (*set_psp_master_device)(struct sp_device *);
+
+	bool irq_registered;
+	bool use_tasklet;
+
+	unsigned int ccp_irq;
+	irq_handler_t ccp_irq_handler;
+	void *ccp_irq_data;
+
+	unsigned int psp_irq;
+	irq_handler_t psp_irq_handler;
+	void *psp_irq_data;
+
+	void *ccp_data;
+	void *psp_data;
+};
+
+int sp_pci_init(void);
+void sp_pci_exit(void);
+
+int sp_platform_init(void);
+void sp_platform_exit(void);
+
+struct sp_device *sp_alloc_struct(struct device *dev);
+
+int sp_init(struct sp_device *sp);
+void sp_destroy(struct sp_device *sp);
+struct sp_device *sp_get_master(void);
+
+int sp_suspend(struct sp_device *sp, pm_message_t state);
+int sp_resume(struct sp_device *sp);
+int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
+		       const char *name, void *data);
+void sp_free_ccp_irq(struct sp_device *sp, void *data);
+int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+		       const char *name, void *data);
+void sp_free_psp_irq(struct sp_device *sp, void *data);
+struct sp_device *sp_get_psp_master_device(void);
+
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+
+int ccp_dev_init(struct sp_device *sp);
+void ccp_dev_destroy(struct sp_device *sp);
+
+int ccp_dev_suspend(struct sp_device *sp, pm_message_t state);
+int ccp_dev_resume(struct sp_device *sp);
+
+#else	/* !CONFIG_CRYPTO_DEV_SP_CCP */
+
+static inline int ccp_dev_init(struct sp_device *sp)
+{
+	return 0;
+}
+static inline void ccp_dev_destroy(struct sp_device *sp) { }
+
+static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+{
+	return 0;
+}
+static inline int ccp_dev_resume(struct sp_device *sp)
+{
+	return 0;
+}
+#endif	/* CONFIG_CRYPTO_DEV_SP_CCP */
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+
+int psp_dev_init(struct sp_device *sp);
+void psp_pci_init(void);
+void psp_dev_destroy(struct sp_device *sp);
+void psp_pci_exit(void);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int psp_dev_init(struct sp_device *sp) { return 0; }
+static inline void psp_pci_init(void) { }
+static inline void psp_dev_destroy(struct sp_device *sp) { }
+static inline void psp_pci_exit(void) { }
+
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+
+#endif
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
new file mode 100644
index 0000000..7da93e9
--- /dev/null
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -0,0 +1,352 @@
+/*
+ * AMD Secure Processor device driver
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "psp-dev.h"
+
+#define MSIX_VECTORS			2
+
+struct sp_pci {
+	int msix_count;
+	struct msix_entry msix_entry[MSIX_VECTORS];
+};
+static struct sp_device *sp_dev_master;
+
+static int sp_get_msix_irqs(struct sp_device *sp)
+{
+	struct sp_pci *sp_pci = sp->dev_specific;
+	struct device *dev = sp->dev;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int v, ret;
+
+	for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++)
+		sp_pci->msix_entry[v].entry = v;
+
+	ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v);
+	if (ret < 0)
+		return ret;
+
+	sp_pci->msix_count = ret;
+	sp->use_tasklet = true;
+
+	sp->psp_irq = sp_pci->msix_entry[0].vector;
+	sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector
+					       : sp_pci->msix_entry[0].vector;
+	return 0;
+}
+
+static int sp_get_msi_irq(struct sp_device *sp)
+{
+	struct device *dev = sp->dev;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int ret;
+
+	ret = pci_enable_msi(pdev);
+	if (ret)
+		return ret;
+
+	sp->ccp_irq = pdev->irq;
+	sp->psp_irq = pdev->irq;
+
+	return 0;
+}
+
+static int sp_get_irqs(struct sp_device *sp)
+{
+	struct device *dev = sp->dev;
+	int ret;
+
+	ret = sp_get_msix_irqs(sp);
+	if (!ret)
+		return 0;
+
+	/* Couldn't get MSI-X vectors, try MSI */
+	dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+	ret = sp_get_msi_irq(sp);
+	if (!ret)
+		return 0;
+
+	/* Couldn't get MSI interrupt */
+	dev_notice(dev, "could not enable MSI (%d)\n", ret);
+
+	return ret;
+}
+
+static void sp_free_irqs(struct sp_device *sp)
+{
+	struct sp_pci *sp_pci = sp->dev_specific;
+	struct device *dev = sp->dev;
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	if (sp_pci->msix_count)
+		pci_disable_msix(pdev);
+	else if (sp->psp_irq)
+		pci_disable_msi(pdev);
+
+	sp->ccp_irq = 0;
+	sp->psp_irq = 0;
+}
+
+static bool sp_pci_is_master(struct sp_device *sp)
+{
+	struct device *dev_cur, *dev_new;
+	struct pci_dev *pdev_cur, *pdev_new;
+
+	dev_new = sp->dev;
+	dev_cur = sp_dev_master->dev;
+
+	pdev_new = to_pci_dev(dev_new);
+	pdev_cur = to_pci_dev(dev_cur);
+
+	if (pdev_new->bus->number < pdev_cur->bus->number)
+		return true;
+
+	if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
+		return true;
+
+	if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
+		return true;
+
+	return false;
+}
+
+static void psp_set_master(struct sp_device *sp)
+{
+	if (!sp_dev_master) {
+		sp_dev_master = sp;
+		return;
+	}
+
+	if (sp_pci_is_master(sp))
+		sp_dev_master = sp;
+}
+
+static struct sp_device *psp_get_master(void)
+{
+	return sp_dev_master;
+}
+
+static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct sp_device *sp;
+	struct sp_pci *sp_pci;
+	struct device *dev = &pdev->dev;
+	void __iomem * const *iomap_table;
+	int bar_mask;
+	int ret;
+
+	ret = -ENOMEM;
+	sp = sp_alloc_struct(dev);
+	if (!sp)
+		goto e_err;
+
+	sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL);
+	if (!sp_pci)
+		goto e_err;
+
+	sp->dev_specific = sp_pci;
+	sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data;
+	if (!sp->dev_vdata) {
+		ret = -ENODEV;
+		dev_err(dev, "missing driver data\n");
+		goto e_err;
+	}
+
+	ret = pcim_enable_device(pdev);
+	if (ret) {
+		dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
+		goto e_err;
+	}
+
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	ret = pcim_iomap_regions(pdev, bar_mask, "ccp");
+	if (ret) {
+		dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
+		goto e_err;
+	}
+
+	iomap_table = pcim_iomap_table(pdev);
+	if (!iomap_table) {
+		dev_err(dev, "pcim_iomap_table failed\n");
+		ret = -ENOMEM;
+		goto e_err;
+	}
+
+	sp->io_map = iomap_table[sp->dev_vdata->bar];
+	if (!sp->io_map) {
+		dev_err(dev, "ioremap failed\n");
+		ret = -ENOMEM;
+		goto e_err;
+	}
+
+	ret = sp_get_irqs(sp);
+	if (ret)
+		goto e_err;
+
+	pci_set_master(pdev);
+	sp->set_psp_master_device = psp_set_master;
+	sp->get_psp_master_device = psp_get_master;
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (ret) {
+		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+		if (ret) {
+			dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+				ret);
+			goto e_err;
+		}
+	}
+
+	dev_set_drvdata(dev, sp);
+
+	ret = sp_init(sp);
+	if (ret)
+		goto e_err;
+
+	dev_notice(dev, "enabled\n");
+
+	return 0;
+
+e_err:
+	dev_notice(dev, "initialization failed\n");
+	return ret;
+}
+
+static void sp_pci_remove(struct pci_dev *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sp_device *sp = dev_get_drvdata(dev);
+
+	if (!sp)
+		return;
+
+	sp_destroy(sp);
+
+	sp_free_irqs(sp);
+
+	dev_notice(dev, "disabled\n");
+}
+
+#ifdef CONFIG_PM
+static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct device *dev = &pdev->dev;
+	struct sp_device *sp = dev_get_drvdata(dev);
+
+	return sp_suspend(sp, state);
+}
+
+static int sp_pci_resume(struct pci_dev *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sp_device *sp = dev_get_drvdata(dev);
+
+	return sp_resume(sp);
+}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+static const struct psp_vdata pspv1 = {
+	.cmdresp_reg		= 0x10580,
+	.cmdbuff_addr_lo_reg	= 0x105e0,
+	.cmdbuff_addr_hi_reg	= 0x105e4,
+	.feature_reg		= 0x105fc,
+	.inten_reg		= 0x10610,
+	.intsts_reg		= 0x10614,
+};
+
+static const struct psp_vdata pspv2 = {
+	.cmdresp_reg		= 0x10980,
+	.cmdbuff_addr_lo_reg	= 0x109e0,
+	.cmdbuff_addr_hi_reg	= 0x109e4,
+	.feature_reg		= 0x109fc,
+	.inten_reg		= 0x10690,
+	.intsts_reg		= 0x10694,
+};
+#endif
+
+static const struct sp_dev_vdata dev_vdata[] = {
+	{	/* 0 */
+		.bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+		.ccp_vdata = &ccpv3,
+#endif
+	},
+	{	/* 1 */
+		.bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+		.ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+		.psp_vdata = &pspv1,
+#endif
+	},
+	{	/* 2 */
+		.bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+		.ccp_vdata = &ccpv5b,
+#endif
+	},
+	{	/* 3 */
+		.bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+		.ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+		.psp_vdata = &pspv2,
+#endif
+	},
+};
+static const struct pci_device_id sp_pci_table[] = {
+	{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
+	{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
+	{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+	{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
+	/* Last entry must be zero */
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, sp_pci_table);
+
+static struct pci_driver sp_pci_driver = {
+	.name = "ccp",
+	.id_table = sp_pci_table,
+	.probe = sp_pci_probe,
+	.remove = sp_pci_remove,
+#ifdef CONFIG_PM
+	.suspend = sp_pci_suspend,
+	.resume = sp_pci_resume,
+#endif
+};
+
+int sp_pci_init(void)
+{
+	return pci_register_driver(&sp_pci_driver);
+}
+
+void sp_pci_exit(void)
+{
+	pci_unregister_driver(&sp_pci_driver);
+}
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
new file mode 100644
index 0000000..71734f2
--- /dev/null
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -0,0 +1,256 @@
+/*
+ * AMD Secure Processor device driver
+ *
+ * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/acpi.h>
+
+#include "ccp-dev.h"
+
+struct sp_platform {
+	int coherent;
+	unsigned int irq_count;
+};
+
+static const struct acpi_device_id sp_acpi_match[];
+static const struct of_device_id sp_of_match[];
+
+static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+	const struct of_device_id *match;
+
+	match = of_match_node(sp_of_match, pdev->dev.of_node);
+	if (match && match->data)
+		return (struct sp_dev_vdata *)match->data;
+#endif
+	return NULL;
+}
+
+static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_ACPI
+	const struct acpi_device_id *match;
+
+	match = acpi_match_device(sp_acpi_match, &pdev->dev);
+	if (match && match->driver_data)
+		return (struct sp_dev_vdata *)match->driver_data;
+#endif
+	return NULL;
+}
+
+static int sp_get_irqs(struct sp_device *sp)
+{
+	struct sp_platform *sp_platform = sp->dev_specific;
+	struct device *dev = sp->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	unsigned int i, count;
+	int ret;
+
+	for (i = 0, count = 0; i < pdev->num_resources; i++) {
+		struct resource *res = &pdev->resource[i];
+
+		if (resource_type(res) == IORESOURCE_IRQ)
+			count++;
+	}
+
+	sp_platform->irq_count = count;
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_notice(dev, "unable to get IRQ (%d)\n", ret);
+		return ret;
+	}
+
+	sp->psp_irq = ret;
+	if (count == 1) {
+		sp->ccp_irq = ret;
+	} else {
+		ret = platform_get_irq(pdev, 1);
+		if (ret < 0) {
+			dev_notice(dev, "unable to get IRQ (%d)\n", ret);
+			return ret;
+		}
+
+		sp->ccp_irq = ret;
+	}
+
+	return 0;
+}
+
+static int sp_platform_probe(struct platform_device *pdev)
+{
+	struct sp_device *sp;
+	struct sp_platform *sp_platform;
+	struct device *dev = &pdev->dev;
+	enum dev_dma_attr attr;
+	struct resource *ior;
+	int ret;
+
+	ret = -ENOMEM;
+	sp = sp_alloc_struct(dev);
+	if (!sp)
+		goto e_err;
+
+	sp_platform = devm_kzalloc(dev, sizeof(*sp_platform), GFP_KERNEL);
+	if (!sp_platform)
+		goto e_err;
+
+	sp->dev_specific = sp_platform;
+	sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev)
+					 : sp_get_acpi_version(pdev);
+	if (!sp->dev_vdata) {
+		ret = -ENODEV;
+		dev_err(dev, "missing driver data\n");
+		goto e_err;
+	}
+
+	ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	sp->io_map = devm_ioremap_resource(dev, ior);
+	if (IS_ERR(sp->io_map)) {
+		ret = PTR_ERR(sp->io_map);
+		goto e_err;
+	}
+
+	attr = device_get_dma_attr(dev);
+	if (attr == DEV_DMA_NOT_SUPPORTED) {
+		dev_err(dev, "DMA is not supported");
+		goto e_err;
+	}
+
+	sp_platform->coherent = (attr == DEV_DMA_COHERENT);
+	if (sp_platform->coherent)
+		sp->axcache = CACHE_WB_NO_ALLOC;
+	else
+		sp->axcache = CACHE_NONE;
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (ret) {
+		dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+		goto e_err;
+	}
+
+	ret = sp_get_irqs(sp);
+	if (ret)
+		goto e_err;
+
+	dev_set_drvdata(dev, sp);
+
+	ret = sp_init(sp);
+	if (ret)
+		goto e_err;
+
+	dev_notice(dev, "enabled\n");
+
+	return 0;
+
+e_err:
+	dev_notice(dev, "initialization failed\n");
+	return ret;
+}
+
+static int sp_platform_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sp_device *sp = dev_get_drvdata(dev);
+
+	sp_destroy(sp);
+
+	dev_notice(dev, "disabled\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int sp_platform_suspend(struct platform_device *pdev,
+				pm_message_t state)
+{
+	struct device *dev = &pdev->dev;
+	struct sp_device *sp = dev_get_drvdata(dev);
+
+	return sp_suspend(sp, state);
+}
+
+static int sp_platform_resume(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sp_device *sp = dev_get_drvdata(dev);
+
+	return sp_resume(sp);
+}
+#endif
+
+static const struct sp_dev_vdata dev_vdata[] = {
+	{
+		.bar = 0,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+		.ccp_vdata = &ccpv3_platform,
+#endif
+	},
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sp_acpi_match[] = {
+	{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id sp_of_match[] = {
+	{ .compatible = "amd,ccp-seattle-v1a",
+	  .data = (const void *)&dev_vdata[0] },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sp_of_match);
+#endif
+
+static struct platform_driver sp_platform_driver = {
+	.driver = {
+		.name = "ccp",
+#ifdef CONFIG_ACPI
+		.acpi_match_table = sp_acpi_match,
+#endif
+#ifdef CONFIG_OF
+		.of_match_table = sp_of_match,
+#endif
+	},
+	.probe = sp_platform_probe,
+	.remove = sp_platform_remove,
+#ifdef CONFIG_PM
+	.suspend = sp_platform_suspend,
+	.resume = sp_platform_resume,
+#endif
+};
+
+int sp_platform_init(void)
+{
+	return platform_driver_register(&sp_platform_driver);
+}
+
+void sp_platform_exit(void)
+{
+	platform_driver_unregister(&sp_platform_driver);
+}
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
new file mode 100644
index 0000000..bdc2797
--- /dev/null
+++ b/drivers/crypto/ccree/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
+ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
+ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
+ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
new file mode 100644
index 0000000..01b82b8
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -0,0 +1,2704 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <linux/rtnetlink.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_aead.h"
+#include "cc_request_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define template_aead	template_u.aead
+
+#define MAX_AEAD_SETKEY_SEQ 12
+#define MAX_AEAD_PROCESS_SEQ 23
+
+#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
+#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
+
+#define AES_CCM_RFC4309_NONCE_SIZE 3
+#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
+
+/* Value of each ICV_CMP byte (of 8) in case of success */
+#define ICV_VERIF_OK 0x01
+
+struct cc_aead_handle {
+	cc_sram_addr_t sram_workspace_addr;
+	struct list_head aead_list;
+};
+
+struct cc_hmac_s {
+	u8 *padded_authkey;
+	u8 *ipad_opad; /* IPAD, OPAD*/
+	dma_addr_t padded_authkey_dma_addr;
+	dma_addr_t ipad_opad_dma_addr;
+};
+
+struct cc_xcbc_s {
+	u8 *xcbc_keys; /* K1,K2,K3 */
+	dma_addr_t xcbc_keys_dma_addr;
+};
+
+struct cc_aead_ctx {
+	struct cc_drvdata *drvdata;
+	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
+	u8 *enckey;
+	dma_addr_t enckey_dma_addr;
+	union {
+		struct cc_hmac_s hmac;
+		struct cc_xcbc_s xcbc;
+	} auth_state;
+	unsigned int enc_keylen;
+	unsigned int auth_keylen;
+	unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
+	enum drv_cipher_mode cipher_mode;
+	enum cc_flow_mode flow_mode;
+	enum drv_hash_mode auth_mode;
+};
+
+static inline bool valid_assoclen(struct aead_request *req)
+{
+	return ((req->assoclen == 16) || (req->assoclen == 20));
+}
+
+static void cc_aead_exit(struct crypto_aead *tfm)
+{
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
+		crypto_tfm_alg_name(&tfm->base));
+
+	/* Unmap enckey buffer */
+	if (ctx->enckey) {
+		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
+				  ctx->enckey_dma_addr);
+		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+			&ctx->enckey_dma_addr);
+		ctx->enckey_dma_addr = 0;
+		ctx->enckey = NULL;
+	}
+
+	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+
+		if (xcbc->xcbc_keys) {
+			dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
+					  xcbc->xcbc_keys,
+					  xcbc->xcbc_keys_dma_addr);
+		}
+		dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+			&xcbc->xcbc_keys_dma_addr);
+		xcbc->xcbc_keys_dma_addr = 0;
+		xcbc->xcbc_keys = NULL;
+	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
+		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+		if (hmac->ipad_opad) {
+			dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
+					  hmac->ipad_opad,
+					  hmac->ipad_opad_dma_addr);
+			dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+				&hmac->ipad_opad_dma_addr);
+			hmac->ipad_opad_dma_addr = 0;
+			hmac->ipad_opad = NULL;
+		}
+		if (hmac->padded_authkey) {
+			dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
+					  hmac->padded_authkey,
+					  hmac->padded_authkey_dma_addr);
+			dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+				&hmac->padded_authkey_dma_addr);
+			hmac->padded_authkey_dma_addr = 0;
+			hmac->padded_authkey = NULL;
+		}
+	}
+}
+
+static int cc_aead_init(struct crypto_aead *tfm)
+{
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_crypto_alg *cc_alg =
+			container_of(alg, struct cc_crypto_alg, aead_alg);
+	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+
+	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
+		crypto_tfm_alg_name(&tfm->base));
+
+	/* Initialize modes in instance */
+	ctx->cipher_mode = cc_alg->cipher_mode;
+	ctx->flow_mode = cc_alg->flow_mode;
+	ctx->auth_mode = cc_alg->auth_mode;
+	ctx->drvdata = cc_alg->drvdata;
+	crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
+
+	/* Allocate key buffer, cache line aligned */
+	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
+					 &ctx->enckey_dma_addr, GFP_KERNEL);
+	if (!ctx->enckey) {
+		dev_err(dev, "Failed allocating key buffer\n");
+		goto init_failed;
+	}
+	dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
+		ctx->enckey);
+
+	/* Set default authlen value */
+
+	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+		const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
+
+		/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
+		/* (and temporary for user key - up to 256b) */
+		xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
+						     &xcbc->xcbc_keys_dma_addr,
+						     GFP_KERNEL);
+		if (!xcbc->xcbc_keys) {
+			dev_err(dev, "Failed allocating buffer for XCBC keys\n");
+			goto init_failed;
+		}
+	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
+		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+		const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
+		dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
+
+		/* Allocate dma-coherent buffer for IPAD + OPAD */
+		hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
+						     &hmac->ipad_opad_dma_addr,
+						     GFP_KERNEL);
+
+		if (!hmac->ipad_opad) {
+			dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
+			goto init_failed;
+		}
+
+		dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
+			hmac->ipad_opad);
+
+		hmac->padded_authkey = dma_alloc_coherent(dev,
+							  MAX_HMAC_BLOCK_SIZE,
+							  pkey_dma,
+							  GFP_KERNEL);
+
+		if (!hmac->padded_authkey) {
+			dev_err(dev, "failed to allocate padded_authkey\n");
+			goto init_failed;
+		}
+	} else {
+		ctx->auth_state.hmac.ipad_opad = NULL;
+		ctx->auth_state.hmac.padded_authkey = NULL;
+	}
+
+	return 0;
+
+init_failed:
+	cc_aead_exit(tfm);
+	return -ENOMEM;
+}
+
+static void cc_aead_complete(struct device *dev, void *cc_req, int err)
+{
+	struct aead_request *areq = (struct aead_request *)cc_req;
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+	cc_unmap_aead_request(dev, areq);
+
+	/* Restore ordinary iv pointer */
+	areq->iv = areq_ctx->backup_iv;
+
+	if (err)
+		goto done;
+
+	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
+			   ctx->authsize) != 0) {
+			dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
+				ctx->authsize, ctx->cipher_mode);
+			/* In case of payload authentication failure, MUST NOT
+			 * revealed the decrypted message --> zero its memory.
+			 */
+			cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
+			err = -EBADMSG;
+		}
+	} else { /*ENCRYPT*/
+		if (areq_ctx->is_icv_fragmented) {
+			u32 skip = areq->cryptlen + areq_ctx->dst_offset;
+
+			cc_copy_sg_portion(dev, areq_ctx->mac_buf,
+					   areq_ctx->dst_sgl, skip,
+					   (skip + ctx->authsize),
+					   CC_SG_FROM_BUF);
+		}
+
+		/* If an IV was generated, copy it back to the user provided
+		 * buffer.
+		 */
+		if (areq_ctx->backup_giv) {
+			if (ctx->cipher_mode == DRV_CIPHER_CTR)
+				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+				       CTR_RFC3686_NONCE_SIZE,
+				       CTR_RFC3686_IV_SIZE);
+			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
+				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+				       CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
+		}
+	}
+done:
+	aead_request_complete(areq, err);
+}
+
+static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
+				struct cc_aead_ctx *ctx)
+{
+	/* Load the AES key */
+	hw_desc_init(&desc[0]);
+	/* We are using for the source/user key the same buffer
+	 * as for the output keys, * because after this key loading it
+	 * is not needed anymore
+	 */
+	set_din_type(&desc[0], DMA_DLLI,
+		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
+		     NS_BIT);
+	set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
+	set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_key_size_aes(&desc[0], ctx->auth_keylen);
+	set_flow_mode(&desc[0], S_DIN_to_AES);
+	set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
+
+	hw_desc_init(&desc[1]);
+	set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[1], DIN_AES_DOUT);
+	set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+		      AES_KEYSIZE_128, NS_BIT, 0);
+
+	hw_desc_init(&desc[2]);
+	set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[2], DIN_AES_DOUT);
+	set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+					 + AES_KEYSIZE_128),
+			      AES_KEYSIZE_128, NS_BIT, 0);
+
+	hw_desc_init(&desc[3]);
+	set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[3], DIN_AES_DOUT);
+	set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+					  + 2 * AES_KEYSIZE_128),
+			      AES_KEYSIZE_128, NS_BIT, 0);
+
+	return 4;
+}
+
+static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
+{
+	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+	unsigned int digest_ofs = 0;
+	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+			DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+			CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+	struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+	unsigned int idx = 0;
+	int i;
+
+	/* calc derived HMAC key */
+	for (i = 0; i < 2; i++) {
+		/* Load hash initial state */
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_din_sram(&desc[idx],
+			     cc_larval_digest_addr(ctx->drvdata,
+						   ctx->auth_mode),
+			     digest_size);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+		idx++;
+
+		/* Load the hash current length*/
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+		idx++;
+
+		/* Prepare ipad key */
+		hw_desc_init(&desc[idx]);
+		set_xor_val(&desc[idx], hmac_pad_const[i]);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+		idx++;
+
+		/* Perform HASH update */
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     hmac->padded_authkey_dma_addr,
+			     SHA256_BLOCK_SIZE, NS_BIT);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_xor_active(&desc[idx]);
+		set_flow_mode(&desc[idx], DIN_HASH);
+		idx++;
+
+		/* Get the digset */
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_dout_dlli(&desc[idx],
+			      (hmac->ipad_opad_dma_addr + digest_ofs),
+			      digest_size, NS_BIT, 0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+		idx++;
+
+		digest_ofs += digest_size;
+	}
+
+	return idx;
+}
+
+static int validate_keys_sizes(struct cc_aead_ctx *ctx)
+{
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
+		ctx->enc_keylen, ctx->auth_keylen);
+
+	switch (ctx->auth_mode) {
+	case DRV_HASH_SHA1:
+	case DRV_HASH_SHA256:
+		break;
+	case DRV_HASH_XCBC_MAC:
+		if (ctx->auth_keylen != AES_KEYSIZE_128 &&
+		    ctx->auth_keylen != AES_KEYSIZE_192 &&
+		    ctx->auth_keylen != AES_KEYSIZE_256)
+			return -ENOTSUPP;
+		break;
+	case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
+		if (ctx->auth_keylen > 0)
+			return -EINVAL;
+		break;
+	default:
+		dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+		return -EINVAL;
+	}
+	/* Check cipher key size */
+	if (ctx->flow_mode == S_DIN_to_DES) {
+		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
+			dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+				ctx->enc_keylen);
+			return -EINVAL;
+		}
+	} else { /* Default assumed to be AES ciphers */
+		if (ctx->enc_keylen != AES_KEYSIZE_128 &&
+		    ctx->enc_keylen != AES_KEYSIZE_192 &&
+		    ctx->enc_keylen != AES_KEYSIZE_256) {
+			dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+				ctx->enc_keylen);
+			return -EINVAL;
+		}
+	}
+
+	return 0; /* All tests of keys sizes passed */
+}
+
+/* This function prepers the user key so it can pass to the hmac processing
+ * (copy to intenral buffer or hash in case of key longer than block
+ */
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	dma_addr_t key_dma_addr = 0;
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
+	struct cc_crypto_req cc_req = {};
+	unsigned int blocksize;
+	unsigned int digestsize;
+	unsigned int hashmode;
+	unsigned int idx = 0;
+	int rc = 0;
+	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+	dma_addr_t padded_authkey_dma_addr =
+		ctx->auth_state.hmac.padded_authkey_dma_addr;
+
+	switch (ctx->auth_mode) { /* auth_key required and >0 */
+	case DRV_HASH_SHA1:
+		blocksize = SHA1_BLOCK_SIZE;
+		digestsize = SHA1_DIGEST_SIZE;
+		hashmode = DRV_HASH_HW_SHA1;
+		break;
+	case DRV_HASH_SHA256:
+	default:
+		blocksize = SHA256_BLOCK_SIZE;
+		digestsize = SHA256_DIGEST_SIZE;
+		hashmode = DRV_HASH_HW_SHA256;
+	}
+
+	if (keylen != 0) {
+		key_dma_addr = dma_map_single(dev, (void *)key, keylen,
+					      DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, key_dma_addr)) {
+			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+				key, keylen);
+			return -ENOMEM;
+		}
+		if (keylen > blocksize) {
+			/* Load hash initial state */
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], hashmode);
+			set_din_sram(&desc[idx], larval_addr, digestsize);
+			set_flow_mode(&desc[idx], S_DIN_to_HASH);
+			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+			idx++;
+
+			/* Load the hash current length*/
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], hashmode);
+			set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+			set_flow_mode(&desc[idx], S_DIN_to_HASH);
+			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+			idx++;
+
+			hw_desc_init(&desc[idx]);
+			set_din_type(&desc[idx], DMA_DLLI,
+				     key_dma_addr, keylen, NS_BIT);
+			set_flow_mode(&desc[idx], DIN_HASH);
+			idx++;
+
+			/* Get hashed key */
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], hashmode);
+			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+				      digestsize, NS_BIT, 0);
+			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+			set_cipher_config0(&desc[idx],
+					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+			idx++;
+
+			hw_desc_init(&desc[idx]);
+			set_din_const(&desc[idx], 0, (blocksize - digestsize));
+			set_flow_mode(&desc[idx], BYPASS);
+			set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
+				      digestsize), (blocksize - digestsize),
+				      NS_BIT, 0);
+			idx++;
+		} else {
+			hw_desc_init(&desc[idx]);
+			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+				     keylen, NS_BIT);
+			set_flow_mode(&desc[idx], BYPASS);
+			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+				      keylen, NS_BIT, 0);
+			idx++;
+
+			if ((blocksize - keylen) != 0) {
+				hw_desc_init(&desc[idx]);
+				set_din_const(&desc[idx], 0,
+					      (blocksize - keylen));
+				set_flow_mode(&desc[idx], BYPASS);
+				set_dout_dlli(&desc[idx],
+					      (padded_authkey_dma_addr +
+					       keylen),
+					      (blocksize - keylen), NS_BIT, 0);
+				idx++;
+			}
+		}
+	} else {
+		hw_desc_init(&desc[idx]);
+		set_din_const(&desc[idx], 0, (blocksize - keylen));
+		set_flow_mode(&desc[idx], BYPASS);
+		set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+			      blocksize, NS_BIT, 0);
+		idx++;
+	}
+
+	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+	if (rc)
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+
+	if (key_dma_addr)
+		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+
+	return rc;
+}
+
+static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			  unsigned int keylen)
+{
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct rtattr *rta = (struct rtattr *)key;
+	struct cc_crypto_req cc_req = {};
+	struct crypto_authenc_key_param *param;
+	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+	int rc = -EINVAL;
+	unsigned int seq_len = 0;
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
+		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
+		if (!RTA_OK(rta, keylen))
+			goto badkey;
+		if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+			goto badkey;
+		if (RTA_PAYLOAD(rta) < sizeof(*param))
+			goto badkey;
+		param = RTA_DATA(rta);
+		ctx->enc_keylen = be32_to_cpu(param->enckeylen);
+		key += RTA_ALIGN(rta->rta_len);
+		keylen -= RTA_ALIGN(rta->rta_len);
+		if (keylen < ctx->enc_keylen)
+			goto badkey;
+		ctx->auth_keylen = keylen - ctx->enc_keylen;
+
+		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+			/* the nonce is stored in bytes at end of key */
+			if (ctx->enc_keylen <
+			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
+				goto badkey;
+			/* Copy nonce from last 4 bytes in CTR key to
+			 *  first 4 bytes in CTR IV
+			 */
+			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
+			       ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
+			       CTR_RFC3686_NONCE_SIZE);
+			/* Set CTR key size */
+			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
+		}
+	} else { /* non-authenc - has just one key */
+		ctx->enc_keylen = keylen;
+		ctx->auth_keylen = 0;
+	}
+
+	rc = validate_keys_sizes(ctx);
+	if (rc)
+		goto badkey;
+
+	/* STAT_PHASE_1: Copy key to ctx */
+
+	/* Get key material */
+	memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+	if (ctx->enc_keylen == 24)
+		memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+		memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
+		rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+		if (rc)
+			goto badkey;
+	}
+
+	/* STAT_PHASE_2: Create sequence */
+
+	switch (ctx->auth_mode) {
+	case DRV_HASH_SHA1:
+	case DRV_HASH_SHA256:
+		seq_len = hmac_setkey(desc, ctx);
+		break;
+	case DRV_HASH_XCBC_MAC:
+		seq_len = xcbc_setkey(desc, ctx);
+		break;
+	case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
+		break; /* No auth. key setup */
+	default:
+		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
+		rc = -ENOTSUPP;
+		goto badkey;
+	}
+
+	/* STAT_PHASE_3: Submit sequence to HW */
+
+	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
+		rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
+		if (rc) {
+			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+			goto setkey_error;
+		}
+	}
+
+	/* Update STAT_PHASE_3 */
+	return rc;
+
+badkey:
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+setkey_error:
+	return rc;
+}
+
+static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (keylen < 3)
+		return -EINVAL;
+
+	keylen -= 3;
+	memcpy(ctx->ctr_nonce, key + keylen, 3);
+
+	return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_aead_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	/* Unsupported auth. sizes */
+	if (authsize == 0 ||
+	    authsize > crypto_aead_maxauthsize(authenc)) {
+		return -ENOTSUPP;
+	}
+
+	ctx->authsize = authsize;
+	dev_dbg(dev, "authlen=%d\n", ctx->authsize);
+
+	return 0;
+}
+
+static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+				      unsigned int authsize)
+{
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_ccm_setauthsize(struct crypto_aead *authenc,
+			      unsigned int authsize)
+{
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return cc_aead_setauthsize(authenc, authsize);
+}
+
+static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+			      struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+	enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+	unsigned int idx = *seq_size;
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	switch (assoc_dma_type) {
+	case CC_DMA_BUF_DLLI:
+		dev_dbg(dev, "ASSOC buffer type DLLI\n");
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
+			     areq->assoclen, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
+		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+		    areq_ctx->cryptlen > 0)
+			set_din_not_last_indication(&desc[idx]);
+		break;
+	case CC_DMA_BUF_MLLI:
+		dev_dbg(dev, "ASSOC buffer type MLLI\n");
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
+			     areq_ctx->assoc.mlli_nents, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
+		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+		    areq_ctx->cryptlen > 0)
+			set_din_not_last_indication(&desc[idx]);
+		break;
+	case CC_DMA_BUF_NULL:
+	default:
+		dev_err(dev, "Invalid ASSOC buffer type\n");
+	}
+
+	*seq_size = (++idx);
+}
+
+static void cc_proc_authen_desc(struct aead_request *areq,
+				unsigned int flow_mode,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size, int direct)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+	unsigned int idx = *seq_size;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	switch (data_dma_type) {
+	case CC_DMA_BUF_DLLI:
+	{
+		struct scatterlist *cipher =
+			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+			areq_ctx->dst_sgl : areq_ctx->src_sgl;
+
+		unsigned int offset =
+			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+			areq_ctx->dst_offset : areq_ctx->src_offset;
+		dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     (sg_dma_address(cipher) + offset),
+			     areq_ctx->cryptlen, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
+		break;
+	}
+	case CC_DMA_BUF_MLLI:
+	{
+		/* DOUBLE-PASS flow (as default)
+		 * assoc. + iv + data -compact in one table
+		 * if assoclen is ZERO only IV perform
+		 */
+		cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
+
+		if (areq_ctx->is_single_pass) {
+			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+				mlli_addr = areq_ctx->dst.sram_addr;
+				mlli_nents = areq_ctx->dst.mlli_nents;
+			} else {
+				mlli_addr = areq_ctx->src.sram_addr;
+				mlli_nents = areq_ctx->src.mlli_nents;
+			}
+		}
+
+		dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
+			     NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
+		break;
+	}
+	case CC_DMA_BUF_NULL:
+	default:
+		dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
+	}
+
+	*seq_size = (++idx);
+}
+
+static void cc_proc_cipher_desc(struct aead_request *areq,
+				unsigned int flow_mode,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size)
+{
+	unsigned int idx = *seq_size;
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	if (areq_ctx->cryptlen == 0)
+		return; /*null processing*/
+
+	switch (data_dma_type) {
+	case CC_DMA_BUF_DLLI:
+		dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     (sg_dma_address(areq_ctx->src_sgl) +
+			      areq_ctx->src_offset), areq_ctx->cryptlen,
+			      NS_BIT);
+		set_dout_dlli(&desc[idx],
+			      (sg_dma_address(areq_ctx->dst_sgl) +
+			       areq_ctx->dst_offset),
+			      areq_ctx->cryptlen, NS_BIT, 0);
+		set_flow_mode(&desc[idx], flow_mode);
+		break;
+	case CC_DMA_BUF_MLLI:
+		dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
+			     areq_ctx->src.mlli_nents, NS_BIT);
+		set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
+			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
+		set_flow_mode(&desc[idx], flow_mode);
+		break;
+	case CC_DMA_BUF_NULL:
+	default:
+		dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
+	}
+
+	*seq_size = (++idx);
+}
+
+static void cc_proc_digest_desc(struct aead_request *req,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	unsigned int idx = *seq_size;
+	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+	int direct = req_ctx->gen_ctx.op_type;
+
+	/* Get final ICV result */
+	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+		hw_desc_init(&desc[idx]);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+		set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
+			      NS_BIT, 1);
+		set_queue_last_ind(ctx->drvdata, &desc[idx]);
+		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+			set_aes_not_hash_mode(&desc[idx]);
+			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+		} else {
+			set_cipher_config0(&desc[idx],
+					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+			set_cipher_mode(&desc[idx], hash_mode);
+		}
+	} else { /*Decrypt*/
+		/* Get ICV out from hardware */
+		hw_desc_init(&desc[idx]);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
+			      ctx->authsize, NS_BIT, 1);
+		set_queue_last_ind(ctx->drvdata, &desc[idx]);
+		set_cipher_config0(&desc[idx],
+				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+			set_aes_not_hash_mode(&desc[idx]);
+		} else {
+			set_cipher_mode(&desc[idx], hash_mode);
+		}
+	}
+
+	*seq_size = (++idx);
+}
+
+static void cc_set_cipher_desc(struct aead_request *req,
+			       struct cc_hw_desc desc[],
+			       unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	unsigned int hw_iv_size = req_ctx->hw_iv_size;
+	unsigned int idx = *seq_size;
+	int direct = req_ctx->gen_ctx.op_type;
+
+	/* Setup cipher state */
+	hw_desc_init(&desc[idx]);
+	set_cipher_config0(&desc[idx], direct);
+	set_flow_mode(&desc[idx], ctx->flow_mode);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
+		     hw_iv_size, NS_BIT);
+	if (ctx->cipher_mode == DRV_CIPHER_CTR)
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	else
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], ctx->cipher_mode);
+	idx++;
+
+	/* Setup enc. key */
+	hw_desc_init(&desc[idx]);
+	set_cipher_config0(&desc[idx], direct);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], ctx->flow_mode);
+	if (ctx->flow_mode == S_DIN_to_AES) {
+		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+			     ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+			      ctx->enc_keylen), NS_BIT);
+		set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	} else {
+		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+			     ctx->enc_keylen, NS_BIT);
+		set_key_size_des(&desc[idx], ctx->enc_keylen);
+	}
+	set_cipher_mode(&desc[idx], ctx->cipher_mode);
+	idx++;
+
+	*seq_size = idx;
+}
+
+static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
+			   unsigned int *seq_size, unsigned int data_flow_mode)
+{
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	int direct = req_ctx->gen_ctx.op_type;
+	unsigned int idx = *seq_size;
+
+	if (req_ctx->cryptlen == 0)
+		return; /*null processing*/
+
+	cc_set_cipher_desc(req, desc, &idx);
+	cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
+	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+		/* We must wait for DMA to write all cipher */
+		hw_desc_init(&desc[idx]);
+		set_din_no_dma(&desc[idx], 0, 0xfffff0);
+		set_dout_no_dma(&desc[idx], 0, 0, 1);
+		idx++;
+	}
+
+	*seq_size = idx;
+}
+
+static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
+			     unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+	unsigned int idx = *seq_size;
+
+	/* Loading hash ipad xor key state */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
+		     NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	idx++;
+
+	/* Load init. digest len (64 bytes) */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+		     ctx->drvdata->hash_len_sz);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	idx++;
+
+	*seq_size = idx;
+}
+
+static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
+			     unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	unsigned int idx = *seq_size;
+
+	/* Loading MAC state */
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	idx++;
+
+	/* Setup XCBC MAC K1 */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+		     AES_KEYSIZE_128, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	idx++;
+
+	/* Setup XCBC MAC K2 */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+		      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	idx++;
+
+	/* Setup XCBC MAC K3 */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+		      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	idx++;
+
+	*seq_size = idx;
+}
+
+static void cc_proc_header_desc(struct aead_request *req,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size)
+{
+	unsigned int idx = *seq_size;
+	/* Hash associated data */
+	if (req->assoclen > 0)
+		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+
+	/* Hash IV */
+	*seq_size = idx;
+}
+
+static void cc_proc_scheme_desc(struct aead_request *req,
+				struct cc_hw_desc desc[],
+				unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+	unsigned int idx = *seq_size;
+
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+		      ctx->drvdata->hash_len_sz);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+	set_cipher_do(&desc[idx], DO_PAD);
+	idx++;
+
+	/* Get final ICV result */
+	hw_desc_init(&desc[idx]);
+	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+		      digest_size);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+	set_cipher_mode(&desc[idx], hash_mode);
+	idx++;
+
+	/* Loading hash opad xor key state */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
+		     digest_size, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	idx++;
+
+	/* Load init. digest len (64 bytes) */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+		     ctx->drvdata->hash_len_sz);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	idx++;
+
+	/* Perform HASH update */
+	hw_desc_init(&desc[idx]);
+	set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
+		     digest_size);
+	set_flow_mode(&desc[idx], DIN_HASH);
+	idx++;
+
+	*seq_size = idx;
+}
+
+static void cc_mlli_to_sram(struct aead_request *req,
+			    struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+	    req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
+	    !req_ctx->is_single_pass) {
+		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
+			(unsigned int)ctx->drvdata->mlli_sram_addr,
+			req_ctx->mlli_params.mlli_len);
+		/* Copy MLLI table host-to-sram */
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI,
+			     req_ctx->mlli_params.mlli_dma_addr,
+			     req_ctx->mlli_params.mlli_len, NS_BIT);
+		set_dout_sram(&desc[*seq_size],
+			      ctx->drvdata->mlli_sram_addr,
+			      req_ctx->mlli_params.mlli_len);
+		set_flow_mode(&desc[*seq_size], BYPASS);
+		(*seq_size)++;
+	}
+}
+
+static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
+					  enum cc_flow_mode setup_flow_mode,
+					  bool is_single_pass)
+{
+	enum cc_flow_mode data_flow_mode;
+
+	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+		if (setup_flow_mode == S_DIN_to_AES)
+			data_flow_mode = is_single_pass ?
+				AES_to_HASH_and_DOUT : DIN_AES_DOUT;
+		else
+			data_flow_mode = is_single_pass ?
+				DES_to_HASH_and_DOUT : DIN_DES_DOUT;
+	} else { /* Decrypt */
+		if (setup_flow_mode == S_DIN_to_AES)
+			data_flow_mode = is_single_pass ?
+				AES_and_HASH : DIN_AES_DOUT;
+		else
+			data_flow_mode = is_single_pass ?
+				DES_and_HASH : DIN_DES_DOUT;
+	}
+
+	return data_flow_mode;
+}
+
+static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+			    unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	int direct = req_ctx->gen_ctx.op_type;
+	unsigned int data_flow_mode =
+		cc_get_data_flow(direct, ctx->flow_mode,
+				 req_ctx->is_single_pass);
+
+	if (req_ctx->is_single_pass) {
+		/**
+		 * Single-pass flow
+		 */
+		cc_set_hmac_desc(req, desc, seq_size);
+		cc_set_cipher_desc(req, desc, seq_size);
+		cc_proc_header_desc(req, desc, seq_size);
+		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+		cc_proc_scheme_desc(req, desc, seq_size);
+		cc_proc_digest_desc(req, desc, seq_size);
+		return;
+	}
+
+	/**
+	 * Double-pass flow
+	 * Fallback for unsupported single-pass modes,
+	 * i.e. using assoc. data of non-word-multiple
+	 */
+	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+		/* encrypt first.. */
+		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+		/* authenc after..*/
+		cc_set_hmac_desc(req, desc, seq_size);
+		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+		cc_proc_scheme_desc(req, desc, seq_size);
+		cc_proc_digest_desc(req, desc, seq_size);
+
+	} else { /*DECRYPT*/
+		/* authenc first..*/
+		cc_set_hmac_desc(req, desc, seq_size);
+		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+		cc_proc_scheme_desc(req, desc, seq_size);
+		/* decrypt after.. */
+		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+		/* read the digest result with setting the completion bit
+		 * must be after the cipher operation
+		 */
+		cc_proc_digest_desc(req, desc, seq_size);
+	}
+}
+
+static void
+cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+		unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	int direct = req_ctx->gen_ctx.op_type;
+	unsigned int data_flow_mode =
+		cc_get_data_flow(direct, ctx->flow_mode,
+				 req_ctx->is_single_pass);
+
+	if (req_ctx->is_single_pass) {
+		/**
+		 * Single-pass flow
+		 */
+		cc_set_xcbc_desc(req, desc, seq_size);
+		cc_set_cipher_desc(req, desc, seq_size);
+		cc_proc_header_desc(req, desc, seq_size);
+		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+		cc_proc_digest_desc(req, desc, seq_size);
+		return;
+	}
+
+	/**
+	 * Double-pass flow
+	 * Fallback for unsupported single-pass modes,
+	 * i.e. using assoc. data of non-word-multiple
+	 */
+	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+		/* encrypt first.. */
+		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+		/* authenc after.. */
+		cc_set_xcbc_desc(req, desc, seq_size);
+		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+		cc_proc_digest_desc(req, desc, seq_size);
+	} else { /*DECRYPT*/
+		/* authenc first.. */
+		cc_set_xcbc_desc(req, desc, seq_size);
+		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+		/* decrypt after..*/
+		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+		/* read the digest result with setting the completion bit
+		 * must be after the cipher operation
+		 */
+		cc_proc_digest_desc(req, desc, seq_size);
+	}
+}
+
+static int validate_data_size(struct cc_aead_ctx *ctx,
+			      enum drv_crypto_direction direct,
+			      struct aead_request *req)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	unsigned int assoclen = req->assoclen;
+	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+			(req->cryptlen - ctx->authsize) : req->cryptlen;
+
+	if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
+	    req->cryptlen < ctx->authsize)
+		goto data_size_err;
+
+	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
+
+	switch (ctx->flow_mode) {
+	case S_DIN_to_AES:
+		if (ctx->cipher_mode == DRV_CIPHER_CBC &&
+		    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
+			goto data_size_err;
+		if (ctx->cipher_mode == DRV_CIPHER_CCM)
+			break;
+		if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+			if (areq_ctx->plaintext_authenticate_only)
+				areq_ctx->is_single_pass = false;
+			break;
+		}
+
+		if (!IS_ALIGNED(assoclen, sizeof(u32)))
+			areq_ctx->is_single_pass = false;
+
+		if (ctx->cipher_mode == DRV_CIPHER_CTR &&
+		    !IS_ALIGNED(cipherlen, sizeof(u32)))
+			areq_ctx->is_single_pass = false;
+
+		break;
+	case S_DIN_to_DES:
+		if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
+			goto data_size_err;
+		if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
+			areq_ctx->is_single_pass = false;
+		break;
+	default:
+		dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
+		goto data_size_err;
+	}
+
+	return 0;
+
+data_size_err:
+	return -EINVAL;
+}
+
+static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
+{
+	unsigned int len = 0;
+
+	if (header_size == 0)
+		return 0;
+
+	if (header_size < ((1UL << 16) - (1UL << 8))) {
+		len = 2;
+
+		pa0_buff[0] = (header_size >> 8) & 0xFF;
+		pa0_buff[1] = header_size & 0xFF;
+	} else {
+		len = 6;
+
+		pa0_buff[0] = 0xFF;
+		pa0_buff[1] = 0xFE;
+		pa0_buff[2] = (header_size >> 24) & 0xFF;
+		pa0_buff[3] = (header_size >> 16) & 0xFF;
+		pa0_buff[4] = (header_size >> 8) & 0xFF;
+		pa0_buff[5] = header_size & 0xFF;
+	}
+
+	return len;
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+		  unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	unsigned int idx = *seq_size;
+	unsigned int cipher_flow_mode;
+	dma_addr_t mac_result;
+
+	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+		cipher_flow_mode = AES_to_HASH_and_DOUT;
+		mac_result = req_ctx->mac_buf_dma_addr;
+	} else { /* Encrypt */
+		cipher_flow_mode = AES_and_HASH;
+		mac_result = req_ctx->icv_dma_addr;
+	}
+
+	/* load key */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
+		      ctx->enc_keylen), NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	/* load ctr state */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	/* load MAC key */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
+		      ctx->enc_keylen), NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	idx++;
+
+	/* load MAC state */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	idx++;
+
+	/* process assoc data */
+	if (req->assoclen > 0) {
+		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+	} else {
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     sg_dma_address(&req_ctx->ccm_adata_sg),
+			     AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
+		set_flow_mode(&desc[idx], DIN_HASH);
+		idx++;
+	}
+
+	/* process the cipher */
+	if (req_ctx->cryptlen)
+		cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
+
+	/* Read temporal MAC */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
+		      NS_BIT, 0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_aes_not_hash_mode(&desc[idx]);
+	idx++;
+
+	/* load AES-CTR state (for last MAC calculation)*/
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	idx++;
+
+	/* encrypt the "T" value and store MAC in mac_state */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+		     ctx->authsize, NS_BIT);
+	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+	set_queue_last_ind(ctx->drvdata, &desc[idx]);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	idx++;
+
+	*seq_size = idx;
+	return 0;
+}
+
+static int config_ccm_adata(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	//unsigned int size_of_a = 0, rem_a_size = 0;
+	unsigned int lp = req->iv[0];
+	/* Note: The code assume that req->iv[0] already contains the value
+	 * of L' of RFC3610
+	 */
+	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
+	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
+	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
+	u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
+	u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+				req->cryptlen :
+				(req->cryptlen - ctx->authsize);
+	int rc;
+
+	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
+
+	/* taken from crypto/ccm.c */
+	/* 2 <= L <= 8, so 1 <= L' <= 7. */
+	if (l < 2 || l > 8) {
+		dev_err(dev, "illegal iv value %X\n", req->iv[0]);
+		return -EINVAL;
+	}
+	memcpy(b0, req->iv, AES_BLOCK_SIZE);
+
+	/* format control info per RFC 3610 and
+	 * NIST Special Publication 800-38C
+	 */
+	*b0 |= (8 * ((m - 2) / 2));
+	if (req->assoclen > 0)
+		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
+
+	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
+	if (rc) {
+		dev_err(dev, "message len overflow detected");
+		return rc;
+	}
+	 /* END of "taken from crypto/ccm.c" */
+
+	/* l(a) - size of associated data. */
+	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+
+	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
+	req->iv[15] = 1;
+
+	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
+	ctr_count_0[15] = 0;
+
+	return 0;
+}
+
+static void cc_proc_rfc4309_ccm(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+	/* L' */
+	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
+	/* For RFC 4309, always use 4 bytes for message length
+	 * (at most 2^32-1 bytes).
+	 */
+	areq_ctx->ctr_iv[0] = 3;
+
+	/* In RFC 4309 there is an 11-bytes nonce+IV part,
+	 * that we build here.
+	 */
+	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
+	       CCM_BLOCK_NONCE_SIZE);
+	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
+	       CCM_BLOCK_IV_SIZE);
+	req->iv = areq_ctx->ctr_iv;
+	req->assoclen -= CCM_BLOCK_IV_SIZE;
+}
+
+static void cc_set_ghash_desc(struct aead_request *req,
+			      struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	unsigned int idx = *seq_size;
+
+	/* load key to AES*/
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+		     ctx->enc_keylen, NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	/* process one zero block to generate hkey */
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+	set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
+		      NS_BIT, 0);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	idx++;
+
+	/* Memory Barrier */
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	idx++;
+
+	/* Load GHASH subkey */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	idx++;
+
+	/* Configure Hash Engine to work with GHASH.
+	 * Since it was not possible to extend HASH submodes to add GHASH,
+	 * The following command is necessary in order to
+	 * select GHASH (according to HW designers)
+	 */
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	idx++;
+
+	/* Load GHASH initial STATE (which is 0). (for any hash there is an
+	 * initial state)
+	 */
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	idx++;
+
+	*seq_size = idx;
+}
+
+static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
+			     unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	unsigned int idx = *seq_size;
+
+	/* load key to AES*/
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+		     ctx->enc_keylen, NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
+		/* load AES/CTR initial CTR value inc by 2*/
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+		set_key_size_aes(&desc[idx], ctx->enc_keylen);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
+			     NS_BIT);
+		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
+		idx++;
+	}
+
+	*seq_size = idx;
+}
+
+static void cc_proc_gcm_result(struct aead_request *req,
+			       struct cc_hw_desc desc[],
+			       unsigned int *seq_size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	dma_addr_t mac_result;
+	unsigned int idx = *seq_size;
+
+	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+		mac_result = req_ctx->mac_buf_dma_addr;
+	} else { /* Encrypt */
+		mac_result = req_ctx->icv_dma_addr;
+	}
+
+	/* process(ghash) gcm_block_len */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
+	idx++;
+
+	/* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
+		      NS_BIT, 0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_aes_not_hash_mode(&desc[idx]);
+
+	idx++;
+
+	/* load AES/CTR initial CTR value inc by 1*/
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	/* Memory Barrier */
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	idx++;
+
+	/* process GCTR on stored GHASH and store MAC in mac_state*/
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+	set_queue_last_ind(ctx->drvdata, &desc[idx]);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	idx++;
+
+	*seq_size = idx;
+}
+
+static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+		  unsigned int *seq_size)
+{
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	unsigned int cipher_flow_mode;
+
+	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+		cipher_flow_mode = AES_and_HASH;
+	} else { /* Encrypt */
+		cipher_flow_mode = AES_to_HASH_and_DOUT;
+	}
+
+	//in RFC4543 no data to encrypt. just copy data from src to dest.
+	if (req_ctx->plaintext_authenticate_only) {
+		cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
+		cc_set_ghash_desc(req, desc, seq_size);
+		/* process(ghash) assoc data */
+		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+		cc_set_gctr_desc(req, desc, seq_size);
+		cc_proc_gcm_result(req, desc, seq_size);
+		return 0;
+	}
+
+	// for gcm and rfc4106.
+	cc_set_ghash_desc(req, desc, seq_size);
+	/* process(ghash) assoc data */
+	if (req->assoclen > 0)
+		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+	cc_set_gctr_desc(req, desc, seq_size);
+	/* process(gctr+ghash) */
+	if (req_ctx->cryptlen)
+		cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
+	cc_proc_gcm_result(req, desc, seq_size);
+
+	return 0;
+}
+
+static int config_gcm_context(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+				req->cryptlen :
+				(req->cryptlen - ctx->authsize);
+	__be32 counter = cpu_to_be32(2);
+
+	dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
+		__func__, cryptlen, req->assoclen, ctx->authsize);
+
+	memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
+
+	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+
+	memcpy(req->iv + 12, &counter, 4);
+	memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
+
+	counter = cpu_to_be32(1);
+	memcpy(req->iv + 12, &counter, 4);
+	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
+
+	if (!req_ctx->plaintext_authenticate_only) {
+		__be64 temp64;
+
+		temp64 = cpu_to_be64(req->assoclen * 8);
+		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+		temp64 = cpu_to_be64(cryptlen * 8);
+		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+	} else {
+		/* rfc4543=>  all data(AAD,IV,Plain) are considered additional
+		 * data that is nothing is encrypted.
+		 */
+		__be64 temp64;
+
+		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
+				      cryptlen) * 8);
+		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+		temp64 = 0;
+		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+	}
+
+	return 0;
+}
+
+static void cc_proc_rfc4_gcm(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
+	       ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
+	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
+	       GCM_BLOCK_RFC4_IV_SIZE);
+	req->iv = areq_ctx->ctr_iv;
+	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+}
+
+static int cc_proc_aead(struct aead_request *req,
+			enum drv_crypto_direction direct)
+{
+	int rc = 0;
+	int seq_len = 0;
+	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct cc_crypto_req cc_req = {};
+
+	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
+		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
+		ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+		sg_virt(req->dst), req->dst->offset, req->cryptlen);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	/* Check data length according to mode */
+	if (validate_data_size(ctx, direct, req)) {
+		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
+			req->cryptlen, req->assoclen);
+		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+		return -EINVAL;
+	}
+
+	/* Setup request structure */
+	cc_req.user_cb = (void *)cc_aead_complete;
+	cc_req.user_arg = (void *)req;
+
+	/* Setup request context */
+	areq_ctx->gen_ctx.op_type = direct;
+	areq_ctx->req_authsize = ctx->authsize;
+	areq_ctx->cipher_mode = ctx->cipher_mode;
+
+	/* STAT_PHASE_1: Map buffers */
+
+	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+		/* Build CTR IV - Copy nonce from last 4 bytes in
+		 * CTR key to first 4 bytes in CTR IV
+		 */
+		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
+		       CTR_RFC3686_NONCE_SIZE);
+		if (!areq_ctx->backup_giv) /*User none-generated IV*/
+			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
+			       req->iv, CTR_RFC3686_IV_SIZE);
+		/* Initialize counter portion of counter block */
+		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
+			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+
+		/* Replace with counter iv */
+		req->iv = areq_ctx->ctr_iv;
+		areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
+	} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
+		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
+		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
+		if (areq_ctx->ctr_iv != req->iv) {
+			memcpy(areq_ctx->ctr_iv, req->iv,
+			       crypto_aead_ivsize(tfm));
+			req->iv = areq_ctx->ctr_iv;
+		}
+	}  else {
+		areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
+	}
+
+	if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+		rc = config_ccm_adata(req);
+		if (rc) {
+			dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
+				rc);
+			goto exit;
+		}
+	} else {
+		areq_ctx->ccm_hdr_size = ccm_header_size_null;
+	}
+
+	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+		rc = config_gcm_context(req);
+		if (rc) {
+			dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
+				rc);
+			goto exit;
+		}
+	}
+
+	rc = cc_map_aead_request(ctx->drvdata, req);
+	if (rc) {
+		dev_err(dev, "map_request() failed\n");
+		goto exit;
+	}
+
+	/* do we need to generate IV? */
+	if (areq_ctx->backup_giv) {
+		/* set the DMA mapped IV address*/
+		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+			cc_req.ivgen_dma_addr[0] =
+				areq_ctx->gen_ctx.iv_dma_addr +
+				CTR_RFC3686_NONCE_SIZE;
+			cc_req.ivgen_dma_addr_len = 1;
+		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+			/* In ccm, the IV needs to exist both inside B0 and
+			 * inside the counter.It is also copied to iv_dma_addr
+			 * for other reasons (like returning it to the user).
+			 * So, using 3 (identical) IV outputs.
+			 */
+			cc_req.ivgen_dma_addr[0] =
+				areq_ctx->gen_ctx.iv_dma_addr +
+				CCM_BLOCK_IV_OFFSET;
+			cc_req.ivgen_dma_addr[1] =
+				sg_dma_address(&areq_ctx->ccm_adata_sg) +
+				CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
+			cc_req.ivgen_dma_addr[2] =
+				sg_dma_address(&areq_ctx->ccm_adata_sg) +
+				CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
+			cc_req.ivgen_dma_addr_len = 3;
+		} else {
+			cc_req.ivgen_dma_addr[0] =
+				areq_ctx->gen_ctx.iv_dma_addr;
+			cc_req.ivgen_dma_addr_len = 1;
+		}
+
+		/* set the IV size (8/16 B long)*/
+		cc_req.ivgen_size = crypto_aead_ivsize(tfm);
+	}
+
+	/* STAT_PHASE_2: Create sequence */
+
+	/* Load MLLI tables to SRAM if necessary */
+	cc_mlli_to_sram(req, desc, &seq_len);
+
+	/*TODO: move seq len by reference */
+	switch (ctx->auth_mode) {
+	case DRV_HASH_SHA1:
+	case DRV_HASH_SHA256:
+		cc_hmac_authenc(req, desc, &seq_len);
+		break;
+	case DRV_HASH_XCBC_MAC:
+		cc_xcbc_authenc(req, desc, &seq_len);
+		break;
+	case DRV_HASH_NULL:
+		if (ctx->cipher_mode == DRV_CIPHER_CCM)
+			cc_ccm(req, desc, &seq_len);
+		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
+			cc_gcm(req, desc, &seq_len);
+		break;
+	default:
+		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
+		cc_unmap_aead_request(dev, req);
+		rc = -ENOTSUPP;
+		goto exit;
+	}
+
+	/* STAT_PHASE_3: Lock HW and push sequence */
+
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
+
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		cc_unmap_aead_request(dev, req);
+	}
+
+exit:
+	return rc;
+}
+
+static int cc_aead_encrypt(struct aead_request *req)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	int rc;
+
+	/* No generated IV required */
+	areq_ctx->backup_iv = req->iv;
+	areq_ctx->backup_giv = NULL;
+	areq_ctx->is_gcm4543 = false;
+
+	areq_ctx->plaintext_authenticate_only = false;
+
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
+		req->iv = areq_ctx->backup_iv;
+
+	return rc;
+}
+
+static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
+{
+	/* Very similar to cc_aead_encrypt() above. */
+
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	int rc = -EINVAL;
+
+	if (!valid_assoclen(req)) {
+		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+		goto out;
+	}
+
+	/* No generated IV required */
+	areq_ctx->backup_iv = req->iv;
+	areq_ctx->backup_giv = NULL;
+	areq_ctx->is_gcm4543 = true;
+
+	cc_proc_rfc4309_ccm(req);
+
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
+		req->iv = areq_ctx->backup_iv;
+out:
+	return rc;
+}
+
+static int cc_aead_decrypt(struct aead_request *req)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	int rc;
+
+	/* No generated IV required */
+	areq_ctx->backup_iv = req->iv;
+	areq_ctx->backup_giv = NULL;
+	areq_ctx->is_gcm4543 = false;
+
+	areq_ctx->plaintext_authenticate_only = false;
+
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
+		req->iv = areq_ctx->backup_iv;
+
+	return rc;
+}
+
+static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	int rc = -EINVAL;
+
+	if (!valid_assoclen(req)) {
+		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+		goto out;
+	}
+
+	/* No generated IV required */
+	areq_ctx->backup_iv = req->iv;
+	areq_ctx->backup_giv = NULL;
+
+	areq_ctx->is_gcm4543 = true;
+	cc_proc_rfc4309_ccm(req);
+
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
+		req->iv = areq_ctx->backup_iv;
+
+out:
+	return rc;
+}
+
+static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
+
+	if (keylen < 4)
+		return -EINVAL;
+
+	keylen -= 4;
+	memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+	return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
+
+	if (keylen < 4)
+		return -EINVAL;
+
+	keylen -= 4;
+	memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+	return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_gcm_setauthsize(struct crypto_aead *authenc,
+			      unsigned int authsize)
+{
+	switch (authsize) {
+	case 4:
+	case 8:
+	case 12:
+	case 13:
+	case 14:
+	case 15:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+				      unsigned int authsize)
+{
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "authsize %d\n", authsize);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+				      unsigned int authsize)
+{
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "authsize %d\n", authsize);
+
+	if (authsize != 16)
+		return -EINVAL;
+
+	return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
+{
+	/* Very similar to cc_aead_encrypt() above. */
+
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	int rc = -EINVAL;
+
+	if (!valid_assoclen(req)) {
+		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+		goto out;
+	}
+
+	/* No generated IV required */
+	areq_ctx->backup_iv = req->iv;
+	areq_ctx->backup_giv = NULL;
+
+	areq_ctx->plaintext_authenticate_only = false;
+
+	cc_proc_rfc4_gcm(req);
+	areq_ctx->is_gcm4543 = true;
+
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
+		req->iv = areq_ctx->backup_iv;
+out:
+	return rc;
+}
+
+static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
+{
+	/* Very similar to cc_aead_encrypt() above. */
+
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	int rc;
+
+	//plaintext is not encryped with rfc4543
+	areq_ctx->plaintext_authenticate_only = true;
+
+	/* No generated IV required */
+	areq_ctx->backup_iv = req->iv;
+	areq_ctx->backup_giv = NULL;
+
+	cc_proc_rfc4_gcm(req);
+	areq_ctx->is_gcm4543 = true;
+
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
+		req->iv = areq_ctx->backup_iv;
+
+	return rc;
+}
+
+static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
+{
+	/* Very similar to cc_aead_decrypt() above. */
+
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	int rc = -EINVAL;
+
+	if (!valid_assoclen(req)) {
+		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+		goto out;
+	}
+
+	/* No generated IV required */
+	areq_ctx->backup_iv = req->iv;
+	areq_ctx->backup_giv = NULL;
+
+	areq_ctx->plaintext_authenticate_only = false;
+
+	cc_proc_rfc4_gcm(req);
+	areq_ctx->is_gcm4543 = true;
+
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
+		req->iv = areq_ctx->backup_iv;
+out:
+	return rc;
+}
+
+static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
+{
+	/* Very similar to cc_aead_decrypt() above. */
+
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	int rc;
+
+	//plaintext is not decryped with rfc4543
+	areq_ctx->plaintext_authenticate_only = true;
+
+	/* No generated IV required */
+	areq_ctx->backup_iv = req->iv;
+	areq_ctx->backup_giv = NULL;
+
+	cc_proc_rfc4_gcm(req);
+	areq_ctx->is_gcm4543 = true;
+
+	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+	if (rc != -EINPROGRESS && rc != -EBUSY)
+		req->iv = areq_ctx->backup_iv;
+
+	return rc;
+}
+
+/* aead alg */
+static struct cc_alg_template aead_algs[] = {
+	{
+		.name = "authenc(hmac(sha1),cbc(aes))",
+		.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_SHA1,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "authenc(hmac(sha1),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_DES,
+		.auth_mode = DRV_HASH_SHA1,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "authenc(hmac(sha256),cbc(aes))",
+		.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_SHA256,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "authenc(hmac(sha256),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_DES,
+		.auth_mode = DRV_HASH_SHA256,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "authenc(xcbc(aes),cbc(aes))",
+		.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_XCBC_MAC,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
+		.blocksize = 1,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CTR,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_SHA1,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
+		.blocksize = 1,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CTR,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_SHA256,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
+		.blocksize = 1,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_aead_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CTR,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_XCBC_MAC,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "ccm(aes)",
+		.driver_name = "ccm-aes-ccree",
+		.blocksize = 1,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_ccm_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CCM,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_NULL,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "rfc4309(ccm(aes))",
+		.driver_name = "rfc4309-ccm-aes-ccree",
+		.blocksize = 1,
+		.template_aead = {
+			.setkey = cc_rfc4309_ccm_setkey,
+			.setauthsize = cc_rfc4309_ccm_setauthsize,
+			.encrypt = cc_rfc4309_ccm_encrypt,
+			.decrypt = cc_rfc4309_ccm_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = CCM_BLOCK_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CCM,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_NULL,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "gcm(aes)",
+		.driver_name = "gcm-aes-ccree",
+		.blocksize = 1,
+		.template_aead = {
+			.setkey = cc_aead_setkey,
+			.setauthsize = cc_gcm_setauthsize,
+			.encrypt = cc_aead_encrypt,
+			.decrypt = cc_aead_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = 12,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_GCTR,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_NULL,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "rfc4106(gcm(aes))",
+		.driver_name = "rfc4106-gcm-aes-ccree",
+		.blocksize = 1,
+		.template_aead = {
+			.setkey = cc_rfc4106_gcm_setkey,
+			.setauthsize = cc_rfc4106_gcm_setauthsize,
+			.encrypt = cc_rfc4106_gcm_encrypt,
+			.decrypt = cc_rfc4106_gcm_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_GCTR,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_NULL,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "rfc4543(gcm(aes))",
+		.driver_name = "rfc4543-gcm-aes-ccree",
+		.blocksize = 1,
+		.template_aead = {
+			.setkey = cc_rfc4543_gcm_setkey,
+			.setauthsize = cc_rfc4543_gcm_setauthsize,
+			.encrypt = cc_rfc4543_gcm_encrypt,
+			.decrypt = cc_rfc4543_gcm_decrypt,
+			.init = cc_aead_init,
+			.exit = cc_aead_exit,
+			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_GCTR,
+		.flow_mode = S_DIN_to_AES,
+		.auth_mode = DRV_HASH_NULL,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+};
+
+static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
+						struct device *dev)
+{
+	struct cc_crypto_alg *t_alg;
+	struct aead_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	alg = &tmpl->template_aead;
+
+	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 tmpl->driver_name);
+	alg->base.cra_module = THIS_MODULE;
+	alg->base.cra_priority = CC_CRA_PRIO;
+
+	alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+	alg->init = cc_aead_init;
+	alg->exit = cc_aead_exit;
+
+	t_alg->aead_alg = *alg;
+
+	t_alg->cipher_mode = tmpl->cipher_mode;
+	t_alg->flow_mode = tmpl->flow_mode;
+	t_alg->auth_mode = tmpl->auth_mode;
+
+	return t_alg;
+}
+
+int cc_aead_free(struct cc_drvdata *drvdata)
+{
+	struct cc_crypto_alg *t_alg, *n;
+	struct cc_aead_handle *aead_handle =
+		(struct cc_aead_handle *)drvdata->aead_handle;
+
+	if (aead_handle) {
+		/* Remove registered algs */
+		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
+					 entry) {
+			crypto_unregister_aead(&t_alg->aead_alg);
+			list_del(&t_alg->entry);
+			kfree(t_alg);
+		}
+		kfree(aead_handle);
+		drvdata->aead_handle = NULL;
+	}
+
+	return 0;
+}
+
+int cc_aead_alloc(struct cc_drvdata *drvdata)
+{
+	struct cc_aead_handle *aead_handle;
+	struct cc_crypto_alg *t_alg;
+	int rc = -ENOMEM;
+	int alg;
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
+	if (!aead_handle) {
+		rc = -ENOMEM;
+		goto fail0;
+	}
+
+	INIT_LIST_HEAD(&aead_handle->aead_list);
+	drvdata->aead_handle = aead_handle;
+
+	aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
+							 MAX_HMAC_DIGEST_SIZE);
+
+	if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
+		dev_err(dev, "SRAM pool exhausted\n");
+		rc = -ENOMEM;
+		goto fail1;
+	}
+
+	/* Linux crypto */
+	for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
+		if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
+			continue;
+
+		t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
+		if (IS_ERR(t_alg)) {
+			rc = PTR_ERR(t_alg);
+			dev_err(dev, "%s alg allocation failed\n",
+				aead_algs[alg].driver_name);
+			goto fail1;
+		}
+		t_alg->drvdata = drvdata;
+		rc = crypto_register_aead(&t_alg->aead_alg);
+		if (rc) {
+			dev_err(dev, "%s alg registration failed\n",
+				t_alg->aead_alg.base.cra_driver_name);
+			goto fail2;
+		} else {
+			list_add_tail(&t_alg->entry, &aead_handle->aead_list);
+			dev_dbg(dev, "Registered %s\n",
+				t_alg->aead_alg.base.cra_driver_name);
+		}
+	}
+
+	return 0;
+
+fail2:
+	kfree(t_alg);
+fail1:
+	cc_aead_free(drvdata);
+fail0:
+	return rc;
+}
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
new file mode 100644
index 0000000..5edf3b3
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_aead.h
+ * ARM CryptoCell AEAD Crypto API
+ */
+
+#ifndef __CC_AEAD_H__
+#define __CC_AEAD_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+
+/* mac_cmp - HW writes 8 B but all bytes hold the same value */
+#define ICV_CMP_SIZE 8
+#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
+#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
+
+/* defines for AES GCM configuration buffer */
+#define GCM_BLOCK_LEN_SIZE 8
+
+#define GCM_BLOCK_RFC4_IV_OFFSET	4
+#define GCM_BLOCK_RFC4_IV_SIZE		8  /* IV size for rfc's */
+#define GCM_BLOCK_RFC4_NONCE_OFFSET	0
+#define GCM_BLOCK_RFC4_NONCE_SIZE	4
+
+/* Offsets into AES CCM configuration buffer */
+#define CCM_B0_OFFSET 0
+#define CCM_A0_OFFSET 16
+#define CCM_CTR_COUNT_0_OFFSET 32
+/* CCM B0 and CTR_COUNT constants. */
+#define CCM_BLOCK_NONCE_OFFSET 1  /* Nonce offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_NONCE_SIZE   3  /* Nonce size inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_OFFSET    4  /* IV offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_SIZE      8  /* IV size inside B0 and CTR_COUNT */
+
+enum aead_ccm_header_size {
+	ccm_header_size_null = -1,
+	ccm_header_size_zero = 0,
+	ccm_header_size_2 = 2,
+	ccm_header_size_6 = 6,
+	ccm_header_size_max = S32_MAX
+};
+
+struct aead_req_ctx {
+	/* Allocate cache line although only 4 bytes are needed to
+	 *  assure next field falls @ cache line
+	 *  Used for both: digest HW compare and CCM/GCM MAC value
+	 */
+	u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
+	u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
+
+	//used in gcm
+	u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
+	u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
+	u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
+	struct {
+		u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+		u8 len_c[GCM_BLOCK_LEN_SIZE];
+	} gcm_len_block;
+
+	u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
+	/* HW actual size input */
+	unsigned int hw_iv_size ____cacheline_aligned;
+	/* used to prevent cache coherence problem */
+	u8 backup_mac[MAX_MAC_SIZE];
+	u8 *backup_iv; /*store iv for generated IV flow*/
+	u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+	dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
+	/* buffer for internal ccm configurations */
+	dma_addr_t ccm_iv0_dma_addr;
+	dma_addr_t icv_dma_addr; /* Phys. address of ICV */
+
+	//used in gcm
+	/* buffer for internal gcm configurations */
+	dma_addr_t gcm_iv_inc1_dma_addr;
+	/* buffer for internal gcm configurations */
+	dma_addr_t gcm_iv_inc2_dma_addr;
+	dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
+	dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
+	bool is_gcm4543;
+
+	u8 *icv_virt_addr; /* Virt. address of ICV */
+	struct async_gen_req_ctx gen_ctx;
+	struct cc_mlli assoc;
+	struct cc_mlli src;
+	struct cc_mlli dst;
+	struct scatterlist *src_sgl;
+	struct scatterlist *dst_sgl;
+	unsigned int src_offset;
+	unsigned int dst_offset;
+	enum cc_req_dma_buf_type assoc_buff_type;
+	enum cc_req_dma_buf_type data_buff_type;
+	struct mlli_params mlli_params;
+	unsigned int cryptlen;
+	struct scatterlist ccm_adata_sg;
+	enum aead_ccm_header_size ccm_hdr_size;
+	unsigned int req_authsize;
+	enum drv_cipher_mode cipher_mode;
+	bool is_icv_fragmented;
+	bool is_single_pass;
+	bool plaintext_authenticate_only; //for gcm_rfc4543
+};
+
+int cc_aead_alloc(struct cc_drvdata *drvdata);
+int cc_aead_free(struct cc_drvdata *drvdata);
+
+#endif /*__CC_AEAD_H__*/
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
new file mode 100644
index 0000000..dd948e1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -0,0 +1,1647 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+
+#include "cc_buffer_mgr.h"
+#include "cc_lli_defs.h"
+#include "cc_cipher.h"
+#include "cc_hash.h"
+#include "cc_aead.h"
+
+enum dma_buffer_type {
+	DMA_NULL_TYPE = -1,
+	DMA_SGL_TYPE = 1,
+	DMA_BUFF_TYPE = 2,
+};
+
+struct buff_mgr_handle {
+	struct dma_pool *mlli_buffs_pool;
+};
+
+union buffer_array_entry {
+	struct scatterlist *sgl;
+	dma_addr_t buffer_dma;
+};
+
+struct buffer_array {
+	unsigned int num_of_buffers;
+	union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
+	unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
+	int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
+	enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
+	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
+	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+};
+
+static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
+{
+	switch (type) {
+	case CC_DMA_BUF_NULL:
+		return "BUF_NULL";
+	case CC_DMA_BUF_DLLI:
+		return "BUF_DLLI";
+	case CC_DMA_BUF_MLLI:
+		return "BUF_MLLI";
+	default:
+		return "BUF_INVALID";
+	}
+}
+
+/**
+ * cc_copy_mac() - Copy MAC to temporary location
+ *
+ * @dev: device object
+ * @req: aead request object
+ * @dir: [IN] copy from/to sgl
+ */
+static void cc_copy_mac(struct device *dev, struct aead_request *req,
+			enum cc_sg_cpy_direct dir)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	u32 skip = req->assoclen + req->cryptlen;
+
+	if (areq_ctx->is_gcm4543)
+		skip += crypto_aead_ivsize(tfm);
+
+	cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+			   (skip - areq_ctx->req_authsize), skip, dir);
+}
+
+/**
+ * cc_get_sgl_nents() - Get scatterlist number of entries.
+ *
+ * @sg_list: SG list
+ * @nbytes: [IN] Total SGL data bytes.
+ * @lbytes: [OUT] Returns the amount of bytes at the last entry
+ */
+static unsigned int cc_get_sgl_nents(struct device *dev,
+				     struct scatterlist *sg_list,
+				     unsigned int nbytes, u32 *lbytes,
+				     bool *is_chained)
+{
+	unsigned int nents = 0;
+
+	while (nbytes && sg_list) {
+		if (sg_list->length) {
+			nents++;
+			/* get the number of bytes in the last entry */
+			*lbytes = nbytes;
+			nbytes -= (sg_list->length > nbytes) ?
+					nbytes : sg_list->length;
+			sg_list = sg_next(sg_list);
+		} else {
+			sg_list = (struct scatterlist *)sg_page(sg_list);
+			if (is_chained)
+				*is_chained = true;
+		}
+	}
+	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
+	return nents;
+}
+
+/**
+ * cc_zero_sgl() - Zero scatter scatter list data.
+ *
+ * @sgl:
+ */
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
+{
+	struct scatterlist *current_sg = sgl;
+	int sg_index = 0;
+
+	while (sg_index <= data_len) {
+		if (!current_sg) {
+			/* reached the end of the sgl --> just return back */
+			return;
+		}
+		memset(sg_virt(current_sg), 0, current_sg->length);
+		sg_index += current_sg->length;
+		current_sg = sg_next(current_sg);
+	}
+}
+
+/**
+ * cc_copy_sg_portion() - Copy scatter list data,
+ * from to_skip to end, to dest and vice versa
+ *
+ * @dest:
+ * @sg:
+ * @to_skip:
+ * @end:
+ * @direct:
+ */
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
+{
+	u32 nents, lbytes;
+
+	nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+		       (direct == CC_SG_TO_BUF));
+}
+
+static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
+				  u32 buff_size, u32 *curr_nents,
+				  u32 **mlli_entry_pp)
+{
+	u32 *mlli_entry_p = *mlli_entry_pp;
+	u32 new_nents;
+
+	/* Verify there is no memory overflow*/
+	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
+	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
+		return -ENOMEM;
+
+	/*handle buffer longer than 64 kbytes */
+	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
+		cc_lli_set_addr(mlli_entry_p, buff_dma);
+		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
+		dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+			*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+			mlli_entry_p[LLI_WORD1_OFFSET]);
+		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
+		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
+		mlli_entry_p = mlli_entry_p + 2;
+		(*curr_nents)++;
+	}
+	/*Last entry */
+	cc_lli_set_addr(mlli_entry_p, buff_dma);
+	cc_lli_set_size(mlli_entry_p, buff_size);
+	dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+		*curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+		mlli_entry_p[LLI_WORD1_OFFSET]);
+	mlli_entry_p = mlli_entry_p + 2;
+	*mlli_entry_pp = mlli_entry_p;
+	(*curr_nents)++;
+	return 0;
+}
+
+static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
+				u32 sgl_data_len, u32 sgl_offset,
+				u32 *curr_nents, u32 **mlli_entry_pp)
+{
+	struct scatterlist *curr_sgl = sgl;
+	u32 *mlli_entry_p = *mlli_entry_pp;
+	s32 rc = 0;
+
+	for ( ; (curr_sgl && sgl_data_len);
+	      curr_sgl = sg_next(curr_sgl)) {
+		u32 entry_data_len =
+			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
+				sg_dma_len(curr_sgl) - sgl_offset :
+				sgl_data_len;
+		sgl_data_len -= entry_data_len;
+		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
+					    sgl_offset, entry_data_len,
+					    curr_nents, &mlli_entry_p);
+		if (rc)
+			return rc;
+
+		sgl_offset = 0;
+	}
+	*mlli_entry_pp = mlli_entry_p;
+	return 0;
+}
+
+static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
+			    struct mlli_params *mlli_params, gfp_t flags)
+{
+	u32 *mlli_p;
+	u32 total_nents = 0, prev_total_nents = 0;
+	int rc = 0, i;
+
+	dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
+
+	/* Allocate memory from the pointed pool */
+	mlli_params->mlli_virt_addr =
+		dma_pool_alloc(mlli_params->curr_pool, flags,
+			       &mlli_params->mlli_dma_addr);
+	if (!mlli_params->mlli_virt_addr) {
+		dev_err(dev, "dma_pool_alloc() failed\n");
+		rc = -ENOMEM;
+		goto build_mlli_exit;
+	}
+	/* Point to start of MLLI */
+	mlli_p = (u32 *)mlli_params->mlli_virt_addr;
+	/* go over all SG's and link it to one MLLI table */
+	for (i = 0; i < sg_data->num_of_buffers; i++) {
+		union buffer_array_entry *entry = &sg_data->entry[i];
+		u32 tot_len = sg_data->total_data_len[i];
+		u32 offset = sg_data->offset[i];
+
+		if (sg_data->type[i] == DMA_SGL_TYPE)
+			rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
+						  offset, &total_nents,
+						  &mlli_p);
+		else /*DMA_BUFF_TYPE*/
+			rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
+						    tot_len, &total_nents,
+						    &mlli_p);
+		if (rc)
+			return rc;
+
+		/* set last bit in the current table */
+		if (sg_data->mlli_nents[i]) {
+			/*Calculate the current MLLI table length for the
+			 *length field in the descriptor
+			 */
+			*sg_data->mlli_nents[i] +=
+				(total_nents - prev_total_nents);
+			prev_total_nents = total_nents;
+		}
+	}
+
+	/* Set MLLI size for the bypass operation */
+	mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
+
+	dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+		mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
+		mlli_params->mlli_len);
+
+build_mlli_exit:
+	return rc;
+}
+
+static void cc_add_buffer_entry(struct device *dev,
+				struct buffer_array *sgl_data,
+				dma_addr_t buffer_dma, unsigned int buffer_len,
+				bool is_last_entry, u32 *mlli_nents)
+{
+	unsigned int index = sgl_data->num_of_buffers;
+
+	dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
+		index, &buffer_dma, buffer_len, is_last_entry);
+	sgl_data->nents[index] = 1;
+	sgl_data->entry[index].buffer_dma = buffer_dma;
+	sgl_data->offset[index] = 0;
+	sgl_data->total_data_len[index] = buffer_len;
+	sgl_data->type[index] = DMA_BUFF_TYPE;
+	sgl_data->is_last[index] = is_last_entry;
+	sgl_data->mlli_nents[index] = mlli_nents;
+	if (sgl_data->mlli_nents[index])
+		*sgl_data->mlli_nents[index] = 0;
+	sgl_data->num_of_buffers++;
+}
+
+static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
+			    unsigned int nents, struct scatterlist *sgl,
+			    unsigned int data_len, unsigned int data_offset,
+			    bool is_last_table, u32 *mlli_nents)
+{
+	unsigned int index = sgl_data->num_of_buffers;
+
+	dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+		index, nents, sgl, data_len, is_last_table);
+	sgl_data->nents[index] = nents;
+	sgl_data->entry[index].sgl = sgl;
+	sgl_data->offset[index] = data_offset;
+	sgl_data->total_data_len[index] = data_len;
+	sgl_data->type[index] = DMA_SGL_TYPE;
+	sgl_data->is_last[index] = is_last_table;
+	sgl_data->mlli_nents[index] = mlli_nents;
+	if (sgl_data->mlli_nents[index])
+		*sgl_data->mlli_nents[index] = 0;
+	sgl_data->num_of_buffers++;
+}
+
+static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
+			 enum dma_data_direction direction)
+{
+	u32 i, j;
+	struct scatterlist *l_sg = sg;
+
+	for (i = 0; i < nents; i++) {
+		if (!l_sg)
+			break;
+		if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
+			dev_err(dev, "dma_map_page() sg buffer failed\n");
+			goto err;
+		}
+		l_sg = sg_next(l_sg);
+	}
+	return nents;
+
+err:
+	/* Restore mapped parts */
+	for (j = 0; j < i; j++) {
+		if (!sg)
+			break;
+		dma_unmap_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int cc_map_sg(struct device *dev, struct scatterlist *sg,
+		     unsigned int nbytes, int direction, u32 *nents,
+		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
+{
+	bool is_chained = false;
+
+	if (sg_is_last(sg)) {
+		/* One entry only case -set to DLLI */
+		if (dma_map_sg(dev, sg, 1, direction) != 1) {
+			dev_err(dev, "dma_map_sg() single buffer failed\n");
+			return -ENOMEM;
+		}
+		dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+			&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
+			sg->offset, sg->length);
+		*lbytes = nbytes;
+		*nents = 1;
+		*mapped_nents = 1;
+	} else {  /*sg_is_last*/
+		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
+					  &is_chained);
+		if (*nents > max_sg_nents) {
+			*nents = 0;
+			dev_err(dev, "Too many fragments. current %d max %d\n",
+				*nents, max_sg_nents);
+			return -ENOMEM;
+		}
+		if (!is_chained) {
+			/* In case of mmu the number of mapped nents might
+			 * be changed from the original sgl nents
+			 */
+			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+			if (*mapped_nents == 0) {
+				*nents = 0;
+				dev_err(dev, "dma_map_sg() sg buffer failed\n");
+				return -ENOMEM;
+			}
+		} else {
+			/*In this case the driver maps entry by entry so it
+			 * must have the same nents before and after map
+			 */
+			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
+						      direction);
+			if (*mapped_nents != *nents) {
+				*nents = *mapped_nents;
+				dev_err(dev, "dma_map_sg() sg buffer failed\n");
+				return -ENOMEM;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
+cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
+		     u8 *config_data, struct buffer_array *sg_data,
+		     unsigned int assoclen)
+{
+	dev_dbg(dev, " handle additional data config set to DLLI\n");
+	/* create sg for the current buffer */
+	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
+		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
+	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
+		dev_err(dev, "dma_map_sg() config buffer failed\n");
+		return -ENOMEM;
+	}
+	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+		&sg_dma_address(&areq_ctx->ccm_adata_sg),
+		sg_page(&areq_ctx->ccm_adata_sg),
+		sg_virt(&areq_ctx->ccm_adata_sg),
+		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
+	/* prepare for case of MLLI */
+	if (assoclen > 0) {
+		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
+				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+				0, false, NULL);
+	}
+	return 0;
+}
+
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+			   u8 *curr_buff, u32 curr_buff_cnt,
+			   struct buffer_array *sg_data)
+{
+	dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
+	/* create sg for the current buffer */
+	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
+	if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
+		dev_err(dev, "dma_map_sg() src buffer failed\n");
+		return -ENOMEM;
+	}
+	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+		&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+		sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+		areq_ctx->buff_sg->length);
+	areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+	areq_ctx->curr_sg = areq_ctx->buff_sg;
+	areq_ctx->in_nents = 0;
+	/* prepare for case of MLLI */
+	cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+			false, NULL);
+	return 0;
+}
+
+void cc_unmap_cipher_request(struct device *dev, void *ctx,
+				unsigned int ivsize, struct scatterlist *src,
+				struct scatterlist *dst)
+{
+	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
+
+	if (req_ctx->gen_ctx.iv_dma_addr) {
+		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
+		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+				 ivsize, DMA_TO_DEVICE);
+	}
+	/* Release pool */
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+	    req_ctx->mlli_params.mlli_virt_addr) {
+		dma_pool_free(req_ctx->mlli_params.curr_pool,
+			      req_ctx->mlli_params.mlli_virt_addr,
+			      req_ctx->mlli_params.mlli_dma_addr);
+	}
+
+	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+
+	if (src != dst) {
+		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+	}
+}
+
+int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+			  unsigned int ivsize, unsigned int nbytes,
+			  void *info, struct scatterlist *src,
+			  struct scatterlist *dst, gfp_t flags)
+{
+	struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
+	struct mlli_params *mlli_params = &req_ctx->mlli_params;
+	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct buffer_array sg_data;
+	u32 dummy = 0;
+	int rc = 0;
+	u32 mapped_nents = 0;
+
+	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
+	mlli_params->curr_pool = NULL;
+	sg_data.num_of_buffers = 0;
+
+	/* Map IV buffer */
+	if (ivsize) {
+		dump_byte_array("iv", (u8 *)info, ivsize);
+		req_ctx->gen_ctx.iv_dma_addr =
+			dma_map_single(dev, (void *)info,
+				       ivsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
+			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+				ivsize, info);
+			return -ENOMEM;
+		}
+		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
+	} else {
+		req_ctx->gen_ctx.iv_dma_addr = 0;
+	}
+
+	/* Map the src SGL */
+	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+	if (rc) {
+		rc = -ENOMEM;
+		goto cipher_exit;
+	}
+	if (mapped_nents > 1)
+		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+	if (src == dst) {
+		/* Handle inplace operation */
+		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+			req_ctx->out_nents = 0;
+			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+					nbytes, 0, true,
+					&req_ctx->in_mlli_nents);
+		}
+	} else {
+		/* Map the dst sg */
+		if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+			      &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+			      &dummy, &mapped_nents)) {
+			rc = -ENOMEM;
+			goto cipher_exit;
+		}
+		if (mapped_nents > 1)
+			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+					nbytes, 0, true,
+					&req_ctx->in_mlli_nents);
+			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+					nbytes, 0, true,
+					&req_ctx->out_mlli_nents);
+		}
+	}
+
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+		if (rc)
+			goto cipher_exit;
+	}
+
+	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
+		cc_dma_buf_type(req_ctx->dma_buf_type));
+
+	return 0;
+
+cipher_exit:
+	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+	return rc;
+}
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+	u32 dummy;
+	bool chained;
+	u32 size_to_unmap = 0;
+
+	if (areq_ctx->mac_buf_dma_addr) {
+		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
+				 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+	}
+
+	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+		if (areq_ctx->hkey_dma_addr) {
+			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
+					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+		}
+
+		if (areq_ctx->gcm_block_len_dma_addr) {
+			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
+					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
+		}
+
+		if (areq_ctx->gcm_iv_inc1_dma_addr) {
+			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
+					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
+		}
+
+		if (areq_ctx->gcm_iv_inc2_dma_addr) {
+			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
+					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
+		}
+	}
+
+	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+		if (areq_ctx->ccm_iv0_dma_addr) {
+			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
+					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
+		}
+
+		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
+	}
+	if (areq_ctx->gen_ctx.iv_dma_addr) {
+		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
+				 hw_iv_size, DMA_BIDIRECTIONAL);
+	}
+
+	/*In case a pool was set, a table was
+	 *allocated and should be released
+	 */
+	if (areq_ctx->mlli_params.curr_pool) {
+		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+			&areq_ctx->mlli_params.mlli_dma_addr,
+			areq_ctx->mlli_params.mlli_virt_addr);
+		dma_pool_free(areq_ctx->mlli_params.curr_pool,
+			      areq_ctx->mlli_params.mlli_virt_addr,
+			      areq_ctx->mlli_params.mlli_dma_addr);
+	}
+
+	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
+		req->assoclen, req->cryptlen);
+	size_to_unmap = req->assoclen + req->cryptlen;
+	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+		size_to_unmap += areq_ctx->req_authsize;
+	if (areq_ctx->is_gcm4543)
+		size_to_unmap += crypto_aead_ivsize(tfm);
+
+	dma_unmap_sg(dev, req->src,
+		     cc_get_sgl_nents(dev, req->src, size_to_unmap,
+				      &dummy, &chained),
+		     DMA_BIDIRECTIONAL);
+	if (req->src != req->dst) {
+		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+			sg_virt(req->dst));
+		dma_unmap_sg(dev, req->dst,
+			     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
+					      &dummy, &chained),
+			     DMA_BIDIRECTIONAL);
+	}
+	if (drvdata->coherent &&
+	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+	    req->src == req->dst) {
+		/* copy back mac from temporary location to deal with possible
+		 * data memory overriding that caused by cache coherence
+		 * problem.
+		 */
+		cc_copy_mac(dev, req, CC_SG_FROM_BUF);
+	}
+}
+
+static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
+				 unsigned int sgl_nents, unsigned int authsize,
+				 u32 last_entry_data_size,
+				 bool *is_icv_fragmented)
+{
+	unsigned int icv_max_size = 0;
+	unsigned int icv_required_size = authsize > last_entry_data_size ?
+					(authsize - last_entry_data_size) :
+					authsize;
+	unsigned int nents;
+	unsigned int i;
+
+	if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
+		*is_icv_fragmented = false;
+		return 0;
+	}
+
+	for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
+		if (!sgl)
+			break;
+		sgl = sg_next(sgl);
+	}
+
+	if (sgl)
+		icv_max_size = sgl->length;
+
+	if (last_entry_data_size > authsize) {
+		/* ICV attached to data in last entry (not fragmented!) */
+		nents = 0;
+		*is_icv_fragmented = false;
+	} else if (last_entry_data_size == authsize) {
+		/* ICV placed in whole last entry (not fragmented!) */
+		nents = 1;
+		*is_icv_fragmented = false;
+	} else if (icv_max_size > icv_required_size) {
+		nents = 1;
+		*is_icv_fragmented = true;
+	} else if (icv_max_size == icv_required_size) {
+		nents = 2;
+		*is_icv_fragmented = true;
+	} else {
+		dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
+			MAX_ICV_NENTS_SUPPORTED);
+		nents = -1; /*unsupported*/
+	}
+	dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
+		(*is_icv_fragmented ? "true" : "false"), nents);
+
+	return nents;
+}
+
+static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
+			    struct aead_request *req,
+			    struct buffer_array *sg_data,
+			    bool is_last, bool do_chain)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+	struct device *dev = drvdata_to_dev(drvdata);
+	int rc = 0;
+
+	if (!req->iv) {
+		areq_ctx->gen_ctx.iv_dma_addr = 0;
+		goto chain_iv_exit;
+	}
+
+	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
+						       hw_iv_size,
+						       DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
+		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+			hw_iv_size, req->iv);
+		rc = -ENOMEM;
+		goto chain_iv_exit;
+	}
+
+	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
+	// TODO: what about CTR?? ask Ron
+	if (do_chain && areq_ctx->plaintext_authenticate_only) {
+		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
+		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
+		/* Chain to given list */
+		cc_add_buffer_entry(dev, sg_data,
+				    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
+				    iv_size_to_authenc, is_last,
+				    &areq_ctx->assoc.mlli_nents);
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+	}
+
+chain_iv_exit:
+	return rc;
+}
+
+static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+			       struct aead_request *req,
+			       struct buffer_array *sg_data,
+			       bool is_last, bool do_chain)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	int rc = 0;
+	u32 mapped_nents = 0;
+	struct scatterlist *current_sg = req->src;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned int sg_index = 0;
+	u32 size_of_assoc = req->assoclen;
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	if (areq_ctx->is_gcm4543)
+		size_of_assoc += crypto_aead_ivsize(tfm);
+
+	if (!sg_data) {
+		rc = -EINVAL;
+		goto chain_assoc_exit;
+	}
+
+	if (req->assoclen == 0) {
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
+		areq_ctx->assoc.nents = 0;
+		areq_ctx->assoc.mlli_nents = 0;
+		dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
+			cc_dma_buf_type(areq_ctx->assoc_buff_type),
+			areq_ctx->assoc.nents);
+		goto chain_assoc_exit;
+	}
+
+	//iterate over the sgl to see how many entries are for associated data
+	//it is assumed that if we reach here , the sgl is already mapped
+	sg_index = current_sg->length;
+	//the first entry in the scatter list contains all the associated data
+	if (sg_index > size_of_assoc) {
+		mapped_nents++;
+	} else {
+		while (sg_index <= size_of_assoc) {
+			current_sg = sg_next(current_sg);
+			/* if have reached the end of the sgl, then this is
+			 * unexpected
+			 */
+			if (!current_sg) {
+				dev_err(dev, "reached end of sg list. unexpected\n");
+				return -EINVAL;
+			}
+			sg_index += current_sg->length;
+			mapped_nents++;
+		}
+	}
+	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
+		dev_err(dev, "Too many fragments. current %d max %d\n",
+			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+		return -ENOMEM;
+	}
+	areq_ctx->assoc.nents = mapped_nents;
+
+	/* in CCM case we have additional entry for
+	 * ccm header configurations
+	 */
+	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+		if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
+			dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
+				(areq_ctx->assoc.nents + 1),
+				LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+			rc = -ENOMEM;
+			goto chain_assoc_exit;
+		}
+	}
+
+	if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
+	else
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+
+	if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
+		dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
+			cc_dma_buf_type(areq_ctx->assoc_buff_type),
+			areq_ctx->assoc.nents);
+		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+				req->assoclen, 0, is_last,
+				&areq_ctx->assoc.mlli_nents);
+		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+	}
+
+chain_assoc_exit:
+	return rc;
+}
+
+static void cc_prepare_aead_data_dlli(struct aead_request *req,
+				      u32 *src_last_bytes, u32 *dst_last_bytes)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+	unsigned int authsize = areq_ctx->req_authsize;
+
+	areq_ctx->is_icv_fragmented = false;
+	if (req->src == req->dst) {
+		/*INPLACE*/
+		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
+			(*src_last_bytes - authsize);
+		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
+			(*src_last_bytes - authsize);
+	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+		/*NON-INPLACE and DECRYPT*/
+		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
+			(*src_last_bytes - authsize);
+		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
+			(*src_last_bytes - authsize);
+	} else {
+		/*NON-INPLACE and ENCRYPT*/
+		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
+			(*dst_last_bytes - authsize);
+		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
+			(*dst_last_bytes - authsize);
+	}
+}
+
+static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+				     struct aead_request *req,
+				     struct buffer_array *sg_data,
+				     u32 *src_last_bytes, u32 *dst_last_bytes,
+				     bool is_last_table)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+	unsigned int authsize = areq_ctx->req_authsize;
+	int rc = 0, icv_nents;
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct scatterlist *sg;
+
+	if (req->src == req->dst) {
+		/*INPLACE*/
+		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+				areq_ctx->src_sgl, areq_ctx->cryptlen,
+				areq_ctx->src_offset, is_last_table,
+				&areq_ctx->src.mlli_nents);
+
+		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+						  areq_ctx->src.nents,
+						  authsize, *src_last_bytes,
+						  &areq_ctx->is_icv_fragmented);
+		if (icv_nents < 0) {
+			rc = -ENOTSUPP;
+			goto prepare_data_mlli_exit;
+		}
+
+		if (areq_ctx->is_icv_fragmented) {
+			/* Backup happens only when ICV is fragmented, ICV
+			 * verification is made by CPU compare in order to
+			 * simplify MAC verification upon request completion
+			 */
+			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+				/* In coherent platforms (e.g. ACP)
+				 * already copying ICV for any
+				 * INPLACE-DECRYPT operation, hence
+				 * we must neglect this code.
+				 */
+				if (!drvdata->coherent)
+					cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
+				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+			} else {
+				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+				areq_ctx->icv_dma_addr =
+					areq_ctx->mac_buf_dma_addr;
+			}
+		} else { /* Contig. ICV */
+			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
+			/*Should hanlde if the sg is not contig.*/
+			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+				(*src_last_bytes - authsize);
+			areq_ctx->icv_virt_addr = sg_virt(sg) +
+				(*src_last_bytes - authsize);
+		}
+
+	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+		/*NON-INPLACE and DECRYPT*/
+		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+				areq_ctx->src_sgl, areq_ctx->cryptlen,
+				areq_ctx->src_offset, is_last_table,
+				&areq_ctx->src.mlli_nents);
+		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+				areq_ctx->dst_sgl, areq_ctx->cryptlen,
+				areq_ctx->dst_offset, is_last_table,
+				&areq_ctx->dst.mlli_nents);
+
+		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+						  areq_ctx->src.nents,
+						  authsize, *src_last_bytes,
+						  &areq_ctx->is_icv_fragmented);
+		if (icv_nents < 0) {
+			rc = -ENOTSUPP;
+			goto prepare_data_mlli_exit;
+		}
+
+		/* Backup happens only when ICV is fragmented, ICV
+		 * verification is made by CPU compare in order to simplify
+		 * MAC verification upon request completion
+		 */
+		if (areq_ctx->is_icv_fragmented) {
+			cc_copy_mac(dev, req, CC_SG_TO_BUF);
+			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+
+		} else { /* Contig. ICV */
+			sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
+			/*Should hanlde if the sg is not contig.*/
+			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+				(*src_last_bytes - authsize);
+			areq_ctx->icv_virt_addr = sg_virt(sg) +
+				(*src_last_bytes - authsize);
+		}
+
+	} else {
+		/*NON-INPLACE and ENCRYPT*/
+		cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+				areq_ctx->dst_sgl, areq_ctx->cryptlen,
+				areq_ctx->dst_offset, is_last_table,
+				&areq_ctx->dst.mlli_nents);
+		cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+				areq_ctx->src_sgl, areq_ctx->cryptlen,
+				areq_ctx->src_offset, is_last_table,
+				&areq_ctx->src.mlli_nents);
+
+		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
+						  areq_ctx->dst.nents,
+						  authsize, *dst_last_bytes,
+						  &areq_ctx->is_icv_fragmented);
+		if (icv_nents < 0) {
+			rc = -ENOTSUPP;
+			goto prepare_data_mlli_exit;
+		}
+
+		if (!areq_ctx->is_icv_fragmented) {
+			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
+			/* Contig. ICV */
+			areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+				(*dst_last_bytes - authsize);
+			areq_ctx->icv_virt_addr = sg_virt(sg) +
+				(*dst_last_bytes - authsize);
+		} else {
+			areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+		}
+	}
+
+prepare_data_mlli_exit:
+	return rc;
+}
+
+static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+			      struct aead_request *req,
+			      struct buffer_array *sg_data,
+			      bool is_last_table, bool do_chain)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	struct device *dev = drvdata_to_dev(drvdata);
+	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+	unsigned int authsize = areq_ctx->req_authsize;
+	unsigned int src_last_bytes = 0, dst_last_bytes = 0;
+	int rc = 0;
+	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
+	u32 offset = 0;
+	/* non-inplace mode */
+	unsigned int size_for_map = req->assoclen + req->cryptlen;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	u32 sg_index = 0;
+	bool chained = false;
+	bool is_gcm4543 = areq_ctx->is_gcm4543;
+	u32 size_to_skip = req->assoclen;
+
+	if (is_gcm4543)
+		size_to_skip += crypto_aead_ivsize(tfm);
+
+	offset = size_to_skip;
+
+	if (!sg_data)
+		return -EINVAL;
+
+	areq_ctx->src_sgl = req->src;
+	areq_ctx->dst_sgl = req->dst;
+
+	if (is_gcm4543)
+		size_for_map += crypto_aead_ivsize(tfm);
+
+	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+			authsize : 0;
+	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
+					    &src_last_bytes, &chained);
+	sg_index = areq_ctx->src_sgl->length;
+	//check where the data starts
+	while (sg_index <= size_to_skip) {
+		offset -= areq_ctx->src_sgl->length;
+		areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
+		//if have reached the end of the sgl, then this is unexpected
+		if (!areq_ctx->src_sgl) {
+			dev_err(dev, "reached end of sg list. unexpected\n");
+			return -EINVAL;
+		}
+		sg_index += areq_ctx->src_sgl->length;
+		src_mapped_nents--;
+	}
+	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
+		dev_err(dev, "Too many fragments. current %d max %d\n",
+			src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+		return -ENOMEM;
+	}
+
+	areq_ctx->src.nents = src_mapped_nents;
+
+	areq_ctx->src_offset = offset;
+
+	if (req->src != req->dst) {
+		size_for_map = req->assoclen + req->cryptlen;
+		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+				authsize : 0;
+		if (is_gcm4543)
+			size_for_map += crypto_aead_ivsize(tfm);
+
+		rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+			       &areq_ctx->dst.nents,
+			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+			       &dst_mapped_nents);
+		if (rc) {
+			rc = -ENOMEM;
+			goto chain_data_exit;
+		}
+	}
+
+	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+					    &dst_last_bytes, &chained);
+	sg_index = areq_ctx->dst_sgl->length;
+	offset = size_to_skip;
+
+	//check where the data starts
+	while (sg_index <= size_to_skip) {
+		offset -= areq_ctx->dst_sgl->length;
+		areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
+		//if have reached the end of the sgl, then this is unexpected
+		if (!areq_ctx->dst_sgl) {
+			dev_err(dev, "reached end of sg list. unexpected\n");
+			return -EINVAL;
+		}
+		sg_index += areq_ctx->dst_sgl->length;
+		dst_mapped_nents--;
+	}
+	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
+		dev_err(dev, "Too many fragments. current %d max %d\n",
+			dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+		return -ENOMEM;
+	}
+	areq_ctx->dst.nents = dst_mapped_nents;
+	areq_ctx->dst_offset = offset;
+	if (src_mapped_nents > 1 ||
+	    dst_mapped_nents  > 1 ||
+	    do_chain) {
+		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
+		rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+					       &src_last_bytes,
+					       &dst_last_bytes, is_last_table);
+	} else {
+		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
+		cc_prepare_aead_data_dlli(req, &src_last_bytes,
+					  &dst_last_bytes);
+	}
+
+chain_data_exit:
+	return rc;
+}
+
+static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
+				      struct aead_request *req)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	u32 curr_mlli_size = 0;
+
+	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
+		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
+		curr_mlli_size = areq_ctx->assoc.mlli_nents *
+						LLI_ENTRY_BYTE_SIZE;
+	}
+
+	if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
+		/*Inplace case dst nents equal to src nents*/
+		if (req->src == req->dst) {
+			areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
+			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
+								curr_mlli_size;
+			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
+			if (!areq_ctx->is_single_pass)
+				areq_ctx->assoc.mlli_nents +=
+					areq_ctx->src.mlli_nents;
+		} else {
+			if (areq_ctx->gen_ctx.op_type ==
+					DRV_CRYPTO_DIRECTION_DECRYPT) {
+				areq_ctx->src.sram_addr =
+						drvdata->mlli_sram_addr +
+								curr_mlli_size;
+				areq_ctx->dst.sram_addr =
+						areq_ctx->src.sram_addr +
+						areq_ctx->src.mlli_nents *
+						LLI_ENTRY_BYTE_SIZE;
+				if (!areq_ctx->is_single_pass)
+					areq_ctx->assoc.mlli_nents +=
+						areq_ctx->src.mlli_nents;
+			} else {
+				areq_ctx->dst.sram_addr =
+						drvdata->mlli_sram_addr +
+								curr_mlli_size;
+				areq_ctx->src.sram_addr =
+						areq_ctx->dst.sram_addr +
+						areq_ctx->dst.mlli_nents *
+						LLI_ENTRY_BYTE_SIZE;
+				if (!areq_ctx->is_single_pass)
+					areq_ctx->assoc.mlli_nents +=
+						areq_ctx->dst.mlli_nents;
+			}
+		}
+	}
+}
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+{
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct buffer_array sg_data;
+	unsigned int authsize = areq_ctx->req_authsize;
+	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+	int rc = 0;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	bool is_gcm4543 = areq_ctx->is_gcm4543;
+	dma_addr_t dma_addr;
+	u32 mapped_nents = 0;
+	u32 dummy = 0; /*used for the assoc data fragments */
+	u32 size_to_map = 0;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	mlli_params->curr_pool = NULL;
+	sg_data.num_of_buffers = 0;
+
+	/* copy mac to a temporary location to deal with possible
+	 * data memory overriding that caused by cache coherence problem.
+	 */
+	if (drvdata->coherent &&
+	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+	    req->src == req->dst)
+		cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
+	/* cacluate the size for cipher remove ICV in decrypt*/
+	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
+				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+				req->cryptlen :
+				(req->cryptlen - authsize);
+
+	dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
+				  DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, dma_addr)) {
+		dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+			MAX_MAC_SIZE, areq_ctx->mac_buf);
+		rc = -ENOMEM;
+		goto aead_map_failure;
+	}
+	areq_ctx->mac_buf_dma_addr = dma_addr;
+
+	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+		void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+
+		dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
+					  DMA_TO_DEVICE);
+
+		if (dma_mapping_error(dev, dma_addr)) {
+			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+				AES_BLOCK_SIZE, addr);
+			areq_ctx->ccm_iv0_dma_addr = 0;
+			rc = -ENOMEM;
+			goto aead_map_failure;
+		}
+		areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+		if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+					 &sg_data, req->assoclen)) {
+			rc = -ENOMEM;
+			goto aead_map_failure;
+		}
+	}
+
+	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+		dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
+					  DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev, dma_addr)) {
+			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+				AES_BLOCK_SIZE, areq_ctx->hkey);
+			rc = -ENOMEM;
+			goto aead_map_failure;
+		}
+		areq_ctx->hkey_dma_addr = dma_addr;
+
+		dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
+					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_addr)) {
+			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+			rc = -ENOMEM;
+			goto aead_map_failure;
+		}
+		areq_ctx->gcm_block_len_dma_addr = dma_addr;
+
+		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
+					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+		if (dma_mapping_error(dev, dma_addr)) {
+			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
+			areq_ctx->gcm_iv_inc1_dma_addr = 0;
+			rc = -ENOMEM;
+			goto aead_map_failure;
+		}
+		areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
+
+		dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
+					  AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+		if (dma_mapping_error(dev, dma_addr)) {
+			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
+			areq_ctx->gcm_iv_inc2_dma_addr = 0;
+			rc = -ENOMEM;
+			goto aead_map_failure;
+		}
+		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
+	}
+
+	size_to_map = req->cryptlen + req->assoclen;
+	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+		size_to_map += authsize;
+
+	if (is_gcm4543)
+		size_to_map += crypto_aead_ivsize(tfm);
+	rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+		       &areq_ctx->src.nents,
+		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+			LLI_MAX_NUM_OF_DATA_ENTRIES),
+		       &dummy, &mapped_nents);
+	if (rc) {
+		rc = -ENOMEM;
+		goto aead_map_failure;
+	}
+
+	if (areq_ctx->is_single_pass) {
+		/*
+		 * Create MLLI table for:
+		 *   (1) Assoc. data
+		 *   (2) Src/Dst SGLs
+		 *   Note: IV is contg. buffer (not an SGL)
+		 */
+		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+		if (rc)
+			goto aead_map_failure;
+		rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
+		if (rc)
+			goto aead_map_failure;
+		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
+		if (rc)
+			goto aead_map_failure;
+	} else { /* DOUBLE-PASS flow */
+		/*
+		 * Prepare MLLI table(s) in this order:
+		 *
+		 * If ENCRYPT/DECRYPT (inplace):
+		 *   (1) MLLI table for assoc
+		 *   (2) IV entry (chained right after end of assoc)
+		 *   (3) MLLI for src/dst (inplace operation)
+		 *
+		 * If ENCRYPT (non-inplace)
+		 *   (1) MLLI table for assoc
+		 *   (2) IV entry (chained right after end of assoc)
+		 *   (3) MLLI for dst
+		 *   (4) MLLI for src
+		 *
+		 * If DECRYPT (non-inplace)
+		 *   (1) MLLI table for assoc
+		 *   (2) IV entry (chained right after end of assoc)
+		 *   (3) MLLI for src
+		 *   (4) MLLI for dst
+		 */
+		rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+		if (rc)
+			goto aead_map_failure;
+		rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
+		if (rc)
+			goto aead_map_failure;
+		rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
+		if (rc)
+			goto aead_map_failure;
+	}
+
+	/* Mlli support -start building the MLLI according to the above
+	 * results
+	 */
+	if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+	    areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
+		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+		if (rc)
+			goto aead_map_failure;
+
+		cc_update_aead_mlli_nents(drvdata, req);
+		dev_dbg(dev, "assoc params mn %d\n",
+			areq_ctx->assoc.mlli_nents);
+		dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
+		dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
+	}
+	return 0;
+
+aead_map_failure:
+	cc_unmap_aead_request(dev, req);
+	return rc;
+}
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+			      struct scatterlist *src, unsigned int nbytes,
+			      bool do_update, gfp_t flags)
+{
+	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+	struct device *dev = drvdata_to_dev(drvdata);
+	u8 *curr_buff = cc_hash_buf(areq_ctx);
+	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+	struct buffer_array sg_data;
+	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+	u32 dummy = 0;
+	u32 mapped_nents = 0;
+
+	dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+	/* Init the type of the dma buffer */
+	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+	mlli_params->curr_pool = NULL;
+	sg_data.num_of_buffers = 0;
+	areq_ctx->in_nents = 0;
+
+	if (nbytes == 0 && *curr_buff_cnt == 0) {
+		/* nothing to do */
+		return 0;
+	}
+
+	/*TODO: copy data in case that buffer is enough for operation */
+	/* map the previous buffer */
+	if (*curr_buff_cnt) {
+		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+				    &sg_data)) {
+			return -ENOMEM;
+		}
+	}
+
+	if (src && nbytes > 0 && do_update) {
+		if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+			      &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+			      &dummy, &mapped_nents)) {
+			goto unmap_curr_buff;
+		}
+		if (src && mapped_nents == 1 &&
+		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+			memcpy(areq_ctx->buff_sg, src,
+			       sizeof(struct scatterlist));
+			areq_ctx->buff_sg->length = nbytes;
+			areq_ctx->curr_sg = areq_ctx->buff_sg;
+			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+		} else {
+			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+		}
+	}
+
+	/*build mlli */
+	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+		/* add the src data to the sg_data */
+		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+				0, true, &areq_ctx->mlli_nents);
+		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+			goto fail_unmap_din;
+	}
+	/* change the buffer index for the unmap function */
+	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
+	dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
+		cc_dma_buf_type(areq_ctx->data_dma_buf_type));
+	return 0;
+
+fail_unmap_din:
+	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+	if (*curr_buff_cnt)
+		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+	return -ENOMEM;
+}
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+			       struct scatterlist *src, unsigned int nbytes,
+			       unsigned int block_size, gfp_t flags)
+{
+	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+	struct device *dev = drvdata_to_dev(drvdata);
+	u8 *curr_buff = cc_hash_buf(areq_ctx);
+	u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+	u8 *next_buff = cc_next_buf(areq_ctx);
+	u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
+	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+	unsigned int update_data_len;
+	u32 total_in_len = nbytes + *curr_buff_cnt;
+	struct buffer_array sg_data;
+	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+	unsigned int swap_index = 0;
+	u32 dummy = 0;
+	u32 mapped_nents = 0;
+
+	dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+		curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+	/* Init the type of the dma buffer */
+	areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+	mlli_params->curr_pool = NULL;
+	areq_ctx->curr_sg = NULL;
+	sg_data.num_of_buffers = 0;
+	areq_ctx->in_nents = 0;
+
+	if (total_in_len < block_size) {
+		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
+		areq_ctx->in_nents =
+			cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+		sg_copy_to_buffer(src, areq_ctx->in_nents,
+				  &curr_buff[*curr_buff_cnt], nbytes);
+		*curr_buff_cnt += nbytes;
+		return 1;
+	}
+
+	/* Calculate the residue size*/
+	*next_buff_cnt = total_in_len & (block_size - 1);
+	/* update data len */
+	update_data_len = total_in_len - *next_buff_cnt;
+
+	dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
+		*next_buff_cnt, update_data_len);
+
+	/* Copy the new residue to next buffer */
+	if (*next_buff_cnt) {
+		dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+			next_buff, (update_data_len - *curr_buff_cnt),
+			*next_buff_cnt);
+		cc_copy_sg_portion(dev, next_buff, src,
+				   (update_data_len - *curr_buff_cnt),
+				   nbytes, CC_SG_TO_BUF);
+		/* change the buffer index for next operation */
+		swap_index = 1;
+	}
+
+	if (*curr_buff_cnt) {
+		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+				    &sg_data)) {
+			return -ENOMEM;
+		}
+		/* change the buffer index for next operation */
+		swap_index = 1;
+	}
+
+	if (update_data_len > *curr_buff_cnt) {
+		if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+			      DMA_TO_DEVICE, &areq_ctx->in_nents,
+			      LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+			      &mapped_nents)) {
+			goto unmap_curr_buff;
+		}
+		if (mapped_nents == 1 &&
+		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+			/* only one entry in the SG and no previous data */
+			memcpy(areq_ctx->buff_sg, src,
+			       sizeof(struct scatterlist));
+			areq_ctx->buff_sg->length = update_data_len;
+			areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+			areq_ctx->curr_sg = areq_ctx->buff_sg;
+		} else {
+			areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+		}
+	}
+
+	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+		/* add the src data to the sg_data */
+		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+				(update_data_len - *curr_buff_cnt), 0, true,
+				&areq_ctx->mlli_nents);
+		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+			goto fail_unmap_din;
+	}
+	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
+
+	return 0;
+
+fail_unmap_din:
+	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+	if (*curr_buff_cnt)
+		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+	return -ENOMEM;
+}
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+			   struct scatterlist *src, bool do_revert)
+{
+	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+	u32 *prev_len = cc_next_buf_cnt(areq_ctx);
+
+	/*In case a pool was set, a table was
+	 *allocated and should be released
+	 */
+	if (areq_ctx->mlli_params.curr_pool) {
+		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+			&areq_ctx->mlli_params.mlli_dma_addr,
+			areq_ctx->mlli_params.mlli_virt_addr);
+		dma_pool_free(areq_ctx->mlli_params.curr_pool,
+			      areq_ctx->mlli_params.mlli_virt_addr,
+			      areq_ctx->mlli_params.mlli_dma_addr);
+	}
+
+	if (src && areq_ctx->in_nents) {
+		dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+			sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
+		dma_unmap_sg(dev, src,
+			     areq_ctx->in_nents, DMA_TO_DEVICE);
+	}
+
+	if (*prev_len) {
+		dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+			sg_virt(areq_ctx->buff_sg),
+			&sg_dma_address(areq_ctx->buff_sg),
+			sg_dma_len(areq_ctx->buff_sg));
+		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+		if (!do_revert) {
+			/* clean the previous data length for update
+			 * operation
+			 */
+			*prev_len = 0;
+		} else {
+			areq_ctx->buff_index ^= 1;
+		}
+	}
+}
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
+{
+	struct buff_mgr_handle *buff_mgr_handle;
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
+	if (!buff_mgr_handle)
+		return -ENOMEM;
+
+	drvdata->buff_mgr_handle = buff_mgr_handle;
+
+	buff_mgr_handle->mlli_buffs_pool =
+		dma_pool_create("dx_single_mlli_tables", dev,
+				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
+				LLI_ENTRY_BYTE_SIZE,
+				MLLI_TABLE_MIN_ALIGNMENT, 0);
+
+	if (!buff_mgr_handle->mlli_buffs_pool)
+		goto error;
+
+	return 0;
+
+error:
+	cc_buffer_mgr_fini(drvdata);
+	return -ENOMEM;
+}
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
+{
+	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
+
+	if (buff_mgr_handle) {
+		dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
+		kfree(drvdata->buff_mgr_handle);
+		drvdata->buff_mgr_handle = NULL;
+	}
+	return 0;
+}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
new file mode 100644
index 0000000..3ec4b4d
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_buffer_mgr.h
+ * Buffer Manager
+ */
+
+#ifndef __CC_BUFFER_MGR_H__
+#define __CC_BUFFER_MGR_H__
+
+#include <crypto/algapi.h>
+
+#include "cc_driver.h"
+
+enum cc_req_dma_buf_type {
+	CC_DMA_BUF_NULL = 0,
+	CC_DMA_BUF_DLLI,
+	CC_DMA_BUF_MLLI
+};
+
+enum cc_sg_cpy_direct {
+	CC_SG_TO_BUF = 0,
+	CC_SG_FROM_BUF = 1
+};
+
+struct cc_mlli {
+	cc_sram_addr_t sram_addr;
+	unsigned int nents; //sg nents
+	unsigned int mlli_nents; //mlli nents might be different than the above
+};
+
+struct mlli_params {
+	struct dma_pool *curr_pool;
+	u8 *mlli_virt_addr;
+	dma_addr_t mlli_dma_addr;
+	u32 mlli_len;
+};
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
+
+int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+			  unsigned int ivsize, unsigned int nbytes,
+			  void *info, struct scatterlist *src,
+			  struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize,
+			     struct scatterlist *src, struct scatterlist *dst);
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+			      struct scatterlist *src, unsigned int nbytes,
+			      bool do_update, gfp_t flags);
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+			       struct scatterlist *src, unsigned int nbytes,
+			       unsigned int block_size, gfp_t flags);
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+			   struct scatterlist *src, bool do_revert);
+
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
+
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
+
+#endif /*__BUFFER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
new file mode 100644
index 0000000..7623b29
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -0,0 +1,1436 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
+
+#include "cc_driver.h"
+#include "cc_lli_defs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
+
+#define MAX_ABLKCIPHER_SEQ_LEN 6
+
+#define template_skcipher	template_u.skcipher
+
+struct cc_cipher_handle {
+	struct list_head alg_list;
+};
+
+struct cc_user_key_info {
+	u8 *key;
+	dma_addr_t key_dma_addr;
+};
+
+struct cc_hw_key_info {
+	enum cc_hw_crypto_key key1_slot;
+	enum cc_hw_crypto_key key2_slot;
+};
+
+struct cc_cipher_ctx {
+	struct cc_drvdata *drvdata;
+	int keylen;
+	int key_round_number;
+	int cipher_mode;
+	int flow_mode;
+	unsigned int flags;
+	bool hw_key;
+	struct cc_user_key_info user;
+	struct cc_hw_key_info hw;
+	struct crypto_shash *shash_tfm;
+};
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+
+	return ctx_p->hw_key;
+}
+
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
+{
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		switch (size) {
+		case CC_AES_128_BIT_KEY_SIZE:
+		case CC_AES_192_BIT_KEY_SIZE:
+			if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+			    ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+			    ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
+				return 0;
+			break;
+		case CC_AES_256_BIT_KEY_SIZE:
+			return 0;
+		case (CC_AES_192_BIT_KEY_SIZE * 2):
+		case (CC_AES_256_BIT_KEY_SIZE * 2):
+			if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+			    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+			    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
+				return 0;
+			break;
+		default:
+			break;
+		}
+	case S_DIN_to_DES:
+		if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
+			return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+			      unsigned int size)
+{
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		switch (ctx_p->cipher_mode) {
+		case DRV_CIPHER_XTS:
+			if (size >= AES_BLOCK_SIZE &&
+			    IS_ALIGNED(size, AES_BLOCK_SIZE))
+				return 0;
+			break;
+		case DRV_CIPHER_CBC_CTS:
+			if (size >= AES_BLOCK_SIZE)
+				return 0;
+			break;
+		case DRV_CIPHER_OFB:
+		case DRV_CIPHER_CTR:
+				return 0;
+		case DRV_CIPHER_ECB:
+		case DRV_CIPHER_CBC:
+		case DRV_CIPHER_ESSIV:
+		case DRV_CIPHER_BITLOCKER:
+			if (IS_ALIGNED(size, AES_BLOCK_SIZE))
+				return 0;
+			break;
+		default:
+			break;
+		}
+		break;
+	case S_DIN_to_DES:
+		if (IS_ALIGNED(size, DES_BLOCK_SIZE))
+			return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int cc_cipher_init(struct crypto_tfm *tfm)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct cc_crypto_alg *cc_alg =
+			container_of(tfm->__crt_alg, struct cc_crypto_alg,
+				     skcipher_alg.base);
+	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+	unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+	int rc = 0;
+
+	dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+		crypto_tfm_alg_name(tfm));
+
+	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+				    sizeof(struct cipher_req_ctx));
+
+	ctx_p->cipher_mode = cc_alg->cipher_mode;
+	ctx_p->flow_mode = cc_alg->flow_mode;
+	ctx_p->drvdata = cc_alg->drvdata;
+
+	/* Allocate key buffer, cache line aligned */
+	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
+	if (!ctx_p->user.key)
+		return -ENOMEM;
+
+	dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+		ctx_p->user.key);
+
+	/* Map key buffer */
+	ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+						  max_key_buf_size,
+						  DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
+		dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+			max_key_buf_size, ctx_p->user.key);
+		return -ENOMEM;
+	}
+	dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+		max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* Alloc hash tfm for essiv */
+		ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
+		if (IS_ERR(ctx_p->shash_tfm)) {
+			dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+			return PTR_ERR(ctx_p->shash_tfm);
+		}
+	}
+
+	return rc;
+}
+
+static void cc_cipher_exit(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct cc_crypto_alg *cc_alg =
+			container_of(alg, struct cc_crypto_alg,
+				     skcipher_alg.base);
+	unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+	dev_dbg(dev, "Clearing context @%p for %s\n",
+		crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* Free hash tfm for essiv */
+		crypto_free_shash(ctx_p->shash_tfm);
+		ctx_p->shash_tfm = NULL;
+	}
+
+	/* Unmap key buffer */
+	dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
+			 DMA_TO_DEVICE);
+	dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
+		&ctx_p->user.key_dma_addr);
+
+	/* Free key buffer in context */
+	kzfree(ctx_p->user.key);
+	dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+}
+
+struct tdes_keys {
+	u8	key1[DES_KEY_SIZE];
+	u8	key2[DES_KEY_SIZE];
+	u8	key3[DES_KEY_SIZE];
+};
+
+static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
+{
+	switch (slot_num) {
+	case 0:
+		return KFDE0_KEY;
+	case 1:
+		return KFDE1_KEY;
+	case 2:
+		return KFDE2_KEY;
+	case 3:
+		return KFDE3_KEY;
+	}
+	return END_OF_KEYS;
+}
+
+static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
+			     unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	struct cc_hkey_info hki;
+
+	dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n",
+		ctx_p, crypto_tfm_alg_name(tfm), keylen);
+	dump_byte_array("key", (u8 *)key, keylen);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	/* This check the size of the hardware key token */
+	if (keylen != sizeof(hki)) {
+		dev_err(dev, "Unsupported HW key size %d.\n", keylen);
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if (ctx_p->flow_mode != S_DIN_to_AES) {
+		dev_err(dev, "HW key not supported for non-AES flows\n");
+		return -EINVAL;
+	}
+
+	memcpy(&hki, key, keylen);
+
+	/* The real key len for crypto op is the size of the HW key
+	 * referenced by the HW key slot, not the hardware key token
+	 */
+	keylen = hki.keylen;
+
+	if (validate_keys_sizes(ctx_p, keylen)) {
+		dev_err(dev, "Unsupported key size %d.\n", keylen);
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+	if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+		dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
+		return -EINVAL;
+	}
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+	    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+	    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+		if (hki.hw_key1 == hki.hw_key2) {
+			dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+				hki.hw_key1, hki.hw_key2);
+			return -EINVAL;
+		}
+		ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+		if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+			dev_err(dev, "Unsupported hw key2 number (%d)\n",
+				hki.hw_key2);
+			return -EINVAL;
+		}
+	}
+
+	ctx_p->keylen = keylen;
+	ctx_p->hw_key = true;
+	dev_dbg(dev, "cc_is_hw_key ret 0");
+
+	return 0;
+}
+
+static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
+			    unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	u32 tmp[DES3_EDE_EXPKEY_WORDS];
+	struct cc_crypto_alg *cc_alg =
+			container_of(tfm->__crt_alg, struct cc_crypto_alg,
+				     skcipher_alg.base);
+	unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+
+	dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+		ctx_p, crypto_tfm_alg_name(tfm), keylen);
+	dump_byte_array("key", (u8 *)key, keylen);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	if (validate_keys_sizes(ctx_p, keylen)) {
+		dev_err(dev, "Unsupported key size %d.\n", keylen);
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx_p->hw_key = false;
+
+	/*
+	 * Verify DES weak keys
+	 * Note that we're dropping the expanded key since the
+	 * HW does the expansion on its own.
+	 */
+	if (ctx_p->flow_mode == S_DIN_to_DES) {
+		if (keylen == DES3_EDE_KEY_SIZE &&
+		    __des3_ede_setkey(tmp, &tfm->crt_flags, key,
+				      DES3_EDE_KEY_SIZE)) {
+			dev_dbg(dev, "weak 3DES key");
+			return -EINVAL;
+		} else if (!des_ekey(tmp, key) &&
+		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			dev_dbg(dev, "weak DES key");
+			return -EINVAL;
+		}
+	}
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+	    xts_check_key(tfm, key, keylen)) {
+		dev_dbg(dev, "weak XTS key");
+		return -EINVAL;
+	}
+
+	/* STAT_PHASE_1: Copy key to ctx */
+	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
+				max_key_buf_size, DMA_TO_DEVICE);
+
+	memcpy(ctx_p->user.key, key, keylen);
+	if (keylen == 24)
+		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* sha256 for key2 - use sw implementation */
+		int key_len = keylen >> 1;
+		int err;
+
+		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
+		desc->tfm = ctx_p->shash_tfm;
+
+		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
+					  ctx_p->user.key + key_len);
+		if (err) {
+			dev_err(dev, "Failed to hash ESSIV key.\n");
+			return err;
+		}
+	}
+	dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
+				   max_key_buf_size, DMA_TO_DEVICE);
+	ctx_p->keylen = keylen;
+
+	dev_dbg(dev, "return safely");
+	return 0;
+}
+
+static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+				 struct cipher_req_ctx *req_ctx,
+				 unsigned int ivsize, unsigned int nbytes,
+				 struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	int cipher_mode = ctx_p->cipher_mode;
+	int flow_mode = ctx_p->flow_mode;
+	int direction = req_ctx->gen_ctx.op_type;
+	dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+	unsigned int key_len = ctx_p->keylen;
+	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+	unsigned int du_size = nbytes;
+
+	struct cc_crypto_alg *cc_alg =
+		container_of(tfm->__crt_alg, struct cc_crypto_alg,
+			     skcipher_alg.base);
+
+	if (cc_alg->data_unit)
+		du_size = cc_alg->data_unit;
+
+	switch (cipher_mode) {
+	case DRV_CIPHER_CBC:
+	case DRV_CIPHER_CBC_CTS:
+	case DRV_CIPHER_CTR:
+	case DRV_CIPHER_OFB:
+		/* Load cipher state */
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
+			     NS_BIT);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		if (cipher_mode == DRV_CIPHER_CTR ||
+		    cipher_mode == DRV_CIPHER_OFB) {
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+		} else {
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
+		}
+		(*seq_size)++;
+		/*FALLTHROUGH*/
+	case DRV_CIPHER_ECB:
+		/* Load key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (flow_mode == S_DIN_to_AES) {
+			if (cc_is_hw_key(tfm)) {
+				set_hw_crypto_key(&desc[*seq_size],
+						  ctx_p->hw.key1_slot);
+			} else {
+				set_din_type(&desc[*seq_size], DMA_DLLI,
+					     key_dma_addr, ((key_len == 24) ?
+							    AES_MAX_KEY_SIZE :
+							    key_len), NS_BIT);
+			}
+			set_key_size_aes(&desc[*seq_size], key_len);
+		} else {
+			/*des*/
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     key_len, NS_BIT);
+			set_key_size_des(&desc[*seq_size], key_len);
+		}
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+		(*seq_size)++;
+		break;
+	case DRV_CIPHER_XTS:
+	case DRV_CIPHER_ESSIV:
+	case DRV_CIPHER_BITLOCKER:
+		/* Load AES key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (cc_is_hw_key(tfm)) {
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key1_slot);
+		} else {
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     (key_len / 2), NS_BIT);
+		}
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+		(*seq_size)++;
+
+		/* load XEX key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (cc_is_hw_key(tfm)) {
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key2_slot);
+		} else {
+			set_din_type(&desc[*seq_size], DMA_DLLI,
+				     (key_dma_addr + (key_len / 2)),
+				     (key_len / 2), NS_BIT);
+		}
+		set_xex_data_unit_size(&desc[*seq_size], du_size);
+		set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+		(*seq_size)++;
+
+		/* Set state */
+		hw_desc_init(&desc[*seq_size]);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+			     CC_AES_BLOCK_SIZE, NS_BIT);
+		(*seq_size)++;
+		break;
+	default:
+		dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+	}
+}
+
+static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+				 struct cipher_req_ctx *req_ctx,
+				 struct scatterlist *dst,
+				 struct scatterlist *src, unsigned int nbytes,
+				 void *areq, struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	unsigned int flow_mode = ctx_p->flow_mode;
+
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		flow_mode = DIN_AES_DOUT;
+		break;
+	case S_DIN_to_DES:
+		flow_mode = DIN_DES_DOUT;
+		break;
+	default:
+		dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
+		return;
+	}
+	/* Process */
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+		dev_dbg(dev, " data params addr %pad length 0x%X\n",
+			&sg_dma_address(src), nbytes);
+		dev_dbg(dev, " data params addr %pad length 0x%X\n",
+			&sg_dma_address(dst), nbytes);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+			     nbytes, NS_BIT);
+		set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+			      nbytes, NS_BIT, (!areq ? 0 : 1));
+		if (areq)
+			set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		(*seq_size)++;
+	} else {
+		/* bypass */
+		dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+			&req_ctx->mlli_params.mlli_dma_addr,
+			req_ctx->mlli_params.mlli_len,
+			(unsigned int)ctx_p->drvdata->mlli_sram_addr);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI,
+			     req_ctx->mlli_params.mlli_dma_addr,
+			     req_ctx->mlli_params.mlli_len, NS_BIT);
+		set_dout_sram(&desc[*seq_size],
+			      ctx_p->drvdata->mlli_sram_addr,
+			      req_ctx->mlli_params.mlli_len);
+		set_flow_mode(&desc[*seq_size], BYPASS);
+		(*seq_size)++;
+
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_MLLI,
+			     ctx_p->drvdata->mlli_sram_addr,
+			     req_ctx->in_mlli_nents, NS_BIT);
+		if (req_ctx->out_nents == 0) {
+			dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr,
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr);
+			set_dout_mlli(&desc[*seq_size],
+				      ctx_p->drvdata->mlli_sram_addr,
+				      req_ctx->in_mlli_nents, NS_BIT,
+				      (!areq ? 0 : 1));
+		} else {
+			dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr,
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr +
+				(u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
+			set_dout_mlli(&desc[*seq_size],
+				      (ctx_p->drvdata->mlli_sram_addr +
+				       (LLI_ENTRY_BYTE_SIZE *
+					req_ctx->in_mlli_nents)),
+				      req_ctx->out_mlli_nents, NS_BIT,
+				      (!areq ? 0 : 1));
+		}
+		if (areq)
+			set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		(*seq_size)++;
+	}
+}
+
+/*
+ * Update a CTR-AES 128 bit counter
+ */
+static void cc_update_ctr(u8 *ctr, unsigned int increment)
+{
+	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+	    IS_ALIGNED((unsigned long)ctr, 8)) {
+
+		__be64 *high_be = (__be64 *)ctr;
+		__be64 *low_be = high_be + 1;
+		u64 orig_low = __be64_to_cpu(*low_be);
+		u64 new_low = orig_low + (u64)increment;
+
+		*low_be = __cpu_to_be64(new_low);
+
+		if (new_low < orig_low)
+			*high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
+	} else {
+		u8 *pos = (ctr + AES_BLOCK_SIZE);
+		u8 val;
+		unsigned int size;
+
+		for (; increment; increment--)
+			for (size = AES_BLOCK_SIZE; size; size--) {
+				val = *--pos + 1;
+				*pos = val;
+				if (val)
+					break;
+			}
+	}
+}
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
+{
+	struct skcipher_request *req = (struct skcipher_request *)cc_req;
+	struct scatterlist *dst = req->dst;
+	struct scatterlist *src = req->src;
+	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+	unsigned int len;
+
+	switch (ctx_p->cipher_mode) {
+	case DRV_CIPHER_CBC:
+		/*
+		 * The crypto API expects us to set the req->iv to the last
+		 * ciphertext block. For encrypt, simply copy from the result.
+		 * For decrypt, we must copy from a saved buffer since this
+		 * could be an in-place decryption operation and the src is
+		 * lost by this point.
+		 */
+		if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
+			memcpy(req->iv, req_ctx->backup_info, ivsize);
+			kzfree(req_ctx->backup_info);
+		} else if (!err) {
+			len = req->cryptlen - ivsize;
+			scatterwalk_map_and_copy(req->iv, req->dst, len,
+						 ivsize, 0);
+		}
+		break;
+
+	case DRV_CIPHER_CTR:
+		/* Compute the counter of the last block */
+		len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
+		cc_update_ctr((u8 *)req->iv, len);
+		break;
+
+	default:
+		break;
+	}
+
+	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+	kzfree(req_ctx->iv);
+
+	skcipher_request_complete(req, err);
+}
+
+static int cc_cipher_process(struct skcipher_request *req,
+			     enum drv_crypto_direction direction)
+{
+	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+	struct scatterlist *dst = req->dst;
+	struct scatterlist *src = req->src;
+	unsigned int nbytes = req->cryptlen;
+	void *iv = req->iv;
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
+	struct cc_crypto_req cc_req = {};
+	int rc;
+	unsigned int seq_len = 0;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	dev_dbg(dev, "%s req=%p iv=%p nbytes=%d\n",
+		((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+		"Encrypt" : "Decrypt"), req, iv, nbytes);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	/* TODO: check data length according to mode */
+	if (validate_data_size(ctx_p, nbytes)) {
+		dev_err(dev, "Unsupported data size %d.\n", nbytes);
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+		rc = -EINVAL;
+		goto exit_process;
+	}
+	if (nbytes == 0) {
+		/* No data to process is valid */
+		rc = 0;
+		goto exit_process;
+	}
+
+	/* The IV we are handed may be allocted from the stack so
+	 * we must copy it to a DMAable buffer before use.
+	 */
+	req_ctx->iv = kmemdup(iv, ivsize, flags);
+	if (!req_ctx->iv) {
+		rc = -ENOMEM;
+		goto exit_process;
+	}
+
+	/* Setup request structure */
+	cc_req.user_cb = (void *)cc_cipher_complete;
+	cc_req.user_arg = (void *)req;
+
+	/* Setup request context */
+	req_ctx->gen_ctx.op_type = direction;
+
+	/* STAT_PHASE_1: Map buffers */
+
+	rc = cc_map_cipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+				      req_ctx->iv, src, dst, flags);
+	if (rc) {
+		dev_err(dev, "map_request() failed\n");
+		goto exit_process;
+	}
+
+	/* STAT_PHASE_2: Create sequence */
+
+	/* Setup processing */
+	cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+	/* Data processing */
+	cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
+			     &seq_len);
+
+	/* STAT_PHASE_3: Lock HW and push sequence */
+
+	rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+			     &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		/* Failed to send the request or request completed
+		 * synchronously
+		 */
+		cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+	}
+
+exit_process:
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		kzfree(req_ctx->backup_info);
+		kzfree(req_ctx->iv);
+	}
+
+	return rc;
+}
+
+static int cc_cipher_encrypt(struct skcipher_request *req)
+{
+	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+
+	memset(req_ctx, 0, sizeof(*req_ctx));
+
+	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+}
+
+static int cc_cipher_decrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+	gfp_t flags = cc_gfp_flags(&req->base);
+	unsigned int len;
+
+	memset(req_ctx, 0, sizeof(*req_ctx));
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+
+		/* Allocate and save the last IV sized bytes of the source,
+		 * which will be lost in case of in-place decryption.
+		 */
+		req_ctx->backup_info = kzalloc(ivsize, flags);
+		if (!req_ctx->backup_info)
+			return -ENOMEM;
+
+		len = req->cryptlen - ivsize;
+		scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
+					 ivsize, 0);
+	}
+
+	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+}
+
+/* Block cipher alg */
+static const struct cc_alg_template skcipher_algs[] = {
+	{
+		.name = "xts(paes)",
+		.driver_name = "xts-paes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "xts512(paes)",
+		.driver_name = "xts-paes-du512-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 512,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "xts4096(paes)",
+		.driver_name = "xts-paes-du4096-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 4096,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "essiv(paes)",
+		.driver_name = "essiv-paes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "essiv512(paes)",
+		.driver_name = "essiv-paes-du512-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 512,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "essiv4096(paes)",
+		.driver_name = "essiv-paes-du4096-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 4096,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "bitlocker(paes)",
+		.driver_name = "bitlocker-paes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "bitlocker512(paes)",
+		.driver_name = "bitlocker-paes-du512-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 512,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "bitlocker4096(paes)",
+		.driver_name = "bitlocker-paes-du4096-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize =  CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 4096,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "ecb(paes)",
+		.driver_name = "ecb-paes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "cbc(paes)",
+		.driver_name = "cbc-paes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "ofb(paes)",
+		.driver_name = "ofb-paes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_OFB,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "cts(cbc(paes))",
+		.driver_name = "cts-cbc-paes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC_CTS,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "ctr(paes)",
+		.driver_name = "ctr-paes-ccree",
+		.blocksize = 1,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CTR,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "xts512(aes)",
+		.driver_name = "xts-aes-du512-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 512,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "xts4096(aes)",
+		.driver_name = "xts-aes-du4096-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 4096,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "essiv(aes)",
+		.driver_name = "essiv-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "essiv512(aes)",
+		.driver_name = "essiv-aes-du512-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 512,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "essiv4096(aes)",
+		.driver_name = "essiv-aes-du4096-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 4096,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "bitlocker(aes)",
+		.driver_name = "bitlocker-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "bitlocker512(aes)",
+		.driver_name = "bitlocker-aes-du512-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 512,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "bitlocker4096(aes)",
+		.driver_name = "bitlocker-aes-du4096-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+		.data_unit = 4096,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "ecb(aes)",
+		.driver_name = "ecb-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "ofb(aes)",
+		.driver_name = "ofb-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_OFB,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "cts(cbc(aes))",
+		.driver_name = "cts-cbc-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC_CTS,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-ccree",
+		.blocksize = 1,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CTR,
+		.flow_mode = S_DIN_to_AES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-ccree",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_DES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "ecb(des3_ede)",
+		.driver_name = "ecb-3des-ccree",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_DES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-ccree",
+		.blocksize = DES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_DES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "ecb(des)",
+		.driver_name = "ecb-des-ccree",
+		.blocksize = DES_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_DES,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+};
+
+static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
+					   struct device *dev)
+{
+	struct cc_crypto_alg *t_alg;
+	struct skcipher_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	alg = &t_alg->skcipher_alg;
+
+	memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
+
+	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 tmpl->driver_name);
+	alg->base.cra_module = THIS_MODULE;
+	alg->base.cra_priority = CC_CRA_PRIO;
+	alg->base.cra_blocksize = tmpl->blocksize;
+	alg->base.cra_alignmask = 0;
+	alg->base.cra_ctxsize = sizeof(struct cc_cipher_ctx);
+
+	alg->base.cra_init = cc_cipher_init;
+	alg->base.cra_exit = cc_cipher_exit;
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+	t_alg->cipher_mode = tmpl->cipher_mode;
+	t_alg->flow_mode = tmpl->flow_mode;
+	t_alg->data_unit = tmpl->data_unit;
+
+	return t_alg;
+}
+
+int cc_cipher_free(struct cc_drvdata *drvdata)
+{
+	struct cc_crypto_alg *t_alg, *n;
+	struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle;
+
+	if (cipher_handle) {
+		/* Remove registered algs */
+		list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list,
+					 entry) {
+			crypto_unregister_skcipher(&t_alg->skcipher_alg);
+			list_del(&t_alg->entry);
+			kfree(t_alg);
+		}
+		kfree(cipher_handle);
+		drvdata->cipher_handle = NULL;
+	}
+	return 0;
+}
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
+{
+	struct cc_cipher_handle *cipher_handle;
+	struct cc_crypto_alg *t_alg;
+	struct device *dev = drvdata_to_dev(drvdata);
+	int rc = -ENOMEM;
+	int alg;
+
+	cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL);
+	if (!cipher_handle)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&cipher_handle->alg_list);
+	drvdata->cipher_handle = cipher_handle;
+
+	/* Linux crypto */
+	dev_dbg(dev, "Number of algorithms = %zu\n",
+		ARRAY_SIZE(skcipher_algs));
+	for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
+		if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev)
+			continue;
+
+		dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
+		t_alg = cc_create_alg(&skcipher_algs[alg], dev);
+		if (IS_ERR(t_alg)) {
+			rc = PTR_ERR(t_alg);
+			dev_err(dev, "%s alg allocation failed\n",
+				skcipher_algs[alg].driver_name);
+			goto fail0;
+		}
+		t_alg->drvdata = drvdata;
+
+		dev_dbg(dev, "registering %s\n",
+			skcipher_algs[alg].driver_name);
+		rc = crypto_register_skcipher(&t_alg->skcipher_alg);
+		dev_dbg(dev, "%s alg registration rc = %x\n",
+			t_alg->skcipher_alg.base.cra_driver_name, rc);
+		if (rc) {
+			dev_err(dev, "%s alg registration failed\n",
+				t_alg->skcipher_alg.base.cra_driver_name);
+			kfree(t_alg);
+			goto fail0;
+		} else {
+			list_add_tail(&t_alg->entry,
+				      &cipher_handle->alg_list);
+			dev_dbg(dev, "Registered %s\n",
+				t_alg->skcipher_alg.base.cra_driver_name);
+		}
+	}
+	return 0;
+
+fail0:
+	cc_cipher_free(drvdata);
+	return rc;
+}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
new file mode 100644
index 0000000..4dbc0a1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+struct cipher_req_ctx {
+	struct async_gen_req_ctx gen_ctx;
+	enum cc_req_dma_buf_type dma_buf_type;
+	u32 in_nents;
+	u32 in_mlli_nents;
+	u32 out_nents;
+	u32 out_mlli_nents;
+	u8 *backup_info; /*store iv for generated IV flow*/
+	u8 *iv;
+	struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+struct cc_hkey_info {
+	u16 keylen;
+	u8 hw_key1;
+	u8 hw_key2;
+} __packed;
+
+#define CC_HW_KEY_SIZE sizeof(struct cc_hkey_info)
+
+#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
new file mode 100644
index 0000000..e032544
--- /dev/null
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef _CC_CRYPTO_CTX_H_
+#define _CC_CRYPTO_CTX_H_
+
+#include <linux/types.h>
+
+#define CC_DRV_DES_IV_SIZE 8
+#define CC_DRV_DES_BLOCK_SIZE 8
+
+#define CC_DRV_DES_ONE_KEY_SIZE 8
+#define CC_DRV_DES_DOUBLE_KEY_SIZE 16
+#define CC_DRV_DES_TRIPLE_KEY_SIZE 24
+#define CC_DRV_DES_KEY_SIZE_MAX CC_DRV_DES_TRIPLE_KEY_SIZE
+
+#define CC_AES_IV_SIZE 16
+#define CC_AES_IV_SIZE_WORDS (CC_AES_IV_SIZE >> 2)
+
+#define CC_AES_BLOCK_SIZE 16
+#define CC_AES_BLOCK_SIZE_WORDS 4
+
+#define CC_AES_128_BIT_KEY_SIZE 16
+#define CC_AES_128_BIT_KEY_SIZE_WORDS	(CC_AES_128_BIT_KEY_SIZE >> 2)
+#define CC_AES_192_BIT_KEY_SIZE 24
+#define CC_AES_192_BIT_KEY_SIZE_WORDS	(CC_AES_192_BIT_KEY_SIZE >> 2)
+#define CC_AES_256_BIT_KEY_SIZE 32
+#define CC_AES_256_BIT_KEY_SIZE_WORDS	(CC_AES_256_BIT_KEY_SIZE >> 2)
+#define CC_AES_KEY_SIZE_MAX			CC_AES_256_BIT_KEY_SIZE
+#define CC_AES_KEY_SIZE_WORDS_MAX		(CC_AES_KEY_SIZE_MAX >> 2)
+
+#define CC_MD5_DIGEST_SIZE	16
+#define CC_SHA1_DIGEST_SIZE	20
+#define CC_SHA224_DIGEST_SIZE	28
+#define CC_SHA256_DIGEST_SIZE	32
+#define CC_SHA256_DIGEST_SIZE_IN_WORDS 8
+#define CC_SHA384_DIGEST_SIZE	48
+#define CC_SHA512_DIGEST_SIZE	64
+
+#define CC_SHA1_BLOCK_SIZE 64
+#define CC_SHA1_BLOCK_SIZE_IN_WORDS 16
+#define CC_MD5_BLOCK_SIZE 64
+#define CC_MD5_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA224_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA1_224_256_BLOCK_SIZE 64
+#define CC_SHA384_BLOCK_SIZE 128
+#define CC_SHA512_BLOCK_SIZE 128
+
+#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
+#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
+
+#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
+
+#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+
+enum drv_engine_type {
+	DRV_ENGINE_NULL = 0,
+	DRV_ENGINE_AES = 1,
+	DRV_ENGINE_DES = 2,
+	DRV_ENGINE_HASH = 3,
+	DRV_ENGINE_RC4 = 4,
+	DRV_ENGINE_DOUT = 5,
+	DRV_ENGINE_RESERVE32B = S32_MAX,
+};
+
+enum drv_crypto_alg {
+	DRV_CRYPTO_ALG_NULL = -1,
+	DRV_CRYPTO_ALG_AES  = 0,
+	DRV_CRYPTO_ALG_DES  = 1,
+	DRV_CRYPTO_ALG_HASH = 2,
+	DRV_CRYPTO_ALG_C2   = 3,
+	DRV_CRYPTO_ALG_HMAC = 4,
+	DRV_CRYPTO_ALG_AEAD = 5,
+	DRV_CRYPTO_ALG_BYPASS = 6,
+	DRV_CRYPTO_ALG_NUM = 7,
+	DRV_CRYPTO_ALG_RESERVE32B = S32_MAX
+};
+
+enum drv_crypto_direction {
+	DRV_CRYPTO_DIRECTION_NULL = -1,
+	DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
+	DRV_CRYPTO_DIRECTION_DECRYPT = 1,
+	DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
+	DRV_CRYPTO_DIRECTION_RESERVE32B = S32_MAX
+};
+
+enum drv_cipher_mode {
+	DRV_CIPHER_NULL_MODE = -1,
+	DRV_CIPHER_ECB = 0,
+	DRV_CIPHER_CBC = 1,
+	DRV_CIPHER_CTR = 2,
+	DRV_CIPHER_CBC_MAC = 3,
+	DRV_CIPHER_XTS = 4,
+	DRV_CIPHER_XCBC_MAC = 5,
+	DRV_CIPHER_OFB = 6,
+	DRV_CIPHER_CMAC = 7,
+	DRV_CIPHER_CCM = 8,
+	DRV_CIPHER_CBC_CTS = 11,
+	DRV_CIPHER_GCTR = 12,
+	DRV_CIPHER_ESSIV = 13,
+	DRV_CIPHER_BITLOCKER = 14,
+	DRV_CIPHER_RESERVE32B = S32_MAX
+};
+
+enum drv_hash_mode {
+	DRV_HASH_NULL = -1,
+	DRV_HASH_SHA1 = 0,
+	DRV_HASH_SHA256 = 1,
+	DRV_HASH_SHA224 = 2,
+	DRV_HASH_SHA512 = 3,
+	DRV_HASH_SHA384 = 4,
+	DRV_HASH_MD5 = 5,
+	DRV_HASH_CBC_MAC = 6,
+	DRV_HASH_XCBC_MAC = 7,
+	DRV_HASH_CMAC = 8,
+	DRV_HASH_MODE_NUM = 9,
+	DRV_HASH_RESERVE32B = S32_MAX
+};
+
+enum drv_hash_hw_mode {
+	DRV_HASH_HW_MD5 = 0,
+	DRV_HASH_HW_SHA1 = 1,
+	DRV_HASH_HW_SHA256 = 2,
+	DRV_HASH_HW_SHA224 = 10,
+	DRV_HASH_HW_SHA512 = 4,
+	DRV_HASH_HW_SHA384 = 12,
+	DRV_HASH_HW_GHASH = 6,
+	DRV_HASH_HW_RESERVE32B = S32_MAX
+};
+
+#endif /* _CC_CRYPTO_CTX_H_ */
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
new file mode 100644
index 0000000..5ca184e
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/stringify.h>
+#include "cc_driver.h"
+#include "cc_crypto_ctx.h"
+#include "cc_debugfs.h"
+
+struct cc_debugfs_ctx {
+	struct dentry *dir;
+};
+
+#define CC_DEBUG_REG(_X) {	\
+	.name = __stringify(_X),\
+	.offset = CC_REG(_X)	\
+	}
+
+/*
+ * This is a global var for the dentry of the
+ * debugfs ccree/ dir. It is not tied down to
+ * a specific instance of ccree, hence it is
+ * global.
+ */
+static struct dentry *cc_debugfs_dir;
+
+static struct debugfs_reg32 debug_regs[] = {
+	{ .name = "SIGNATURE" }, /* Must be 0th */
+	{ .name = "VERSION" }, /* Must be 1st */
+	CC_DEBUG_REG(HOST_IRR),
+	CC_DEBUG_REG(HOST_POWER_DOWN_EN),
+	CC_DEBUG_REG(AXIM_MON_ERR),
+	CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
+	CC_DEBUG_REG(HOST_IMR),
+	CC_DEBUG_REG(AXIM_CFG),
+	CC_DEBUG_REG(AXIM_CACHE_PARAMS),
+	CC_DEBUG_REG(GPR_HOST),
+	CC_DEBUG_REG(AXIM_MON_COMP),
+};
+
+int __init cc_debugfs_global_init(void)
+{
+	cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+
+	return !cc_debugfs_dir;
+}
+
+void __exit cc_debugfs_global_fini(void)
+{
+	debugfs_remove(cc_debugfs_dir);
+}
+
+int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct cc_debugfs_ctx *ctx;
+	struct debugfs_regset32 *regset;
+	struct dentry *file;
+
+	debug_regs[0].offset = drvdata->sig_offset;
+	debug_regs[1].offset = drvdata->ver_offset;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+	if (!regset)
+		return -ENOMEM;
+
+	regset->regs = debug_regs;
+	regset->nregs = ARRAY_SIZE(debug_regs);
+	regset->base = drvdata->cc_base;
+
+	ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
+	if (!ctx->dir)
+		return -ENFILE;
+
+	file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+	if (!file) {
+		debugfs_remove(ctx->dir);
+		return -ENFILE;
+	}
+
+	file = debugfs_create_bool("coherent", 0400, ctx->dir,
+				   &drvdata->coherent);
+
+	if (!file) {
+		debugfs_remove_recursive(ctx->dir);
+		return -ENFILE;
+	}
+
+	drvdata->debugfs = ctx;
+
+	return 0;
+}
+
+void cc_debugfs_fini(struct cc_drvdata *drvdata)
+{
+	struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
+
+	debugfs_remove_recursive(ctx->dir);
+}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
new file mode 100644
index 0000000..5b5320e
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_DEBUGFS_H__
+#define __CC_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int cc_debugfs_global_init(void);
+void cc_debugfs_global_fini(void);
+
+int cc_debugfs_init(struct cc_drvdata *drvdata);
+void cc_debugfs_fini(struct cc_drvdata *drvdata);
+
+#else
+
+static inline int cc_debugfs_global_init(void)
+{
+	return 0;
+}
+
+static inline void cc_debugfs_global_fini(void) {}
+
+static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+	return 0;
+}
+
+static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
+
+#endif
+
+#endif /*__CC_SYSFS_H__*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
new file mode 100644
index 0000000..1ff229c
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/crypto.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_debugfs.h"
+#include "cc_cipher.h"
+#include "cc_aead.h"
+#include "cc_hash.h"
+#include "cc_ivgen.h"
+#include "cc_sram_mgr.h"
+#include "cc_pm.h"
+#include "cc_fips.h"
+
+bool cc_dump_desc;
+module_param_named(dump_desc, cc_dump_desc, bool, 0600);
+MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
+
+bool cc_dump_bytes;
+module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
+MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+
+struct cc_hw_data {
+	char *name;
+	enum cc_hw_rev rev;
+	u32 sig;
+};
+
+/* Hardware revisions defs. */
+
+static const struct cc_hw_data cc712_hw = {
+	.name = "712", .rev = CC_HW_REV_712, .sig =  0xDCC71200U
+};
+
+static const struct cc_hw_data cc710_hw = {
+	.name = "710", .rev = CC_HW_REV_710, .sig =  0xDCC63200U
+};
+
+static const struct cc_hw_data cc630p_hw = {
+	.name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U
+};
+
+static const struct of_device_id arm_ccree_dev_of_match[] = {
+	{ .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
+	{ .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
+	{ .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
+	{}
+};
+MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len)
+{
+	char prefix[64];
+
+	if (!buf)
+		return;
+
+	snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
+
+	print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
+		       len, false);
+}
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+	struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
+	struct device *dev = drvdata_to_dev(drvdata);
+	u32 irr;
+	u32 imr;
+
+	/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
+
+	/* read the interrupt status */
+	irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
+	dev_dbg(dev, "Got IRR=0x%08X\n", irr);
+	if (irr == 0) { /* Probably shared interrupt line */
+		dev_err(dev, "Got interrupt with empty IRR\n");
+		return IRQ_NONE;
+	}
+	imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
+
+	/* clear interrupt - must be before processing events */
+	cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
+
+	drvdata->irq = irr;
+	/* Completion interrupt - most probable */
+	if (irr & CC_COMP_IRQ_MASK) {
+		/* Mask AXI completion interrupt - will be unmasked in
+		 * Deferred service handler
+		 */
+		cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
+		irr &= ~CC_COMP_IRQ_MASK;
+		complete_request(drvdata);
+	}
+#ifdef CONFIG_CRYPTO_FIPS
+	/* TEE FIPS interrupt */
+	if (irr & CC_GPR0_IRQ_MASK) {
+		/* Mask interrupt - will be unmasked in Deferred service
+		 * handler
+		 */
+		cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
+		irr &= ~CC_GPR0_IRQ_MASK;
+		fips_handler(drvdata);
+	}
+#endif
+	/* AXI error interrupt */
+	if (irr & CC_AXI_ERR_IRQ_MASK) {
+		u32 axi_err;
+
+		/* Read the AXI error ID */
+		axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
+		dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
+			axi_err);
+
+		irr &= ~CC_AXI_ERR_IRQ_MASK;
+	}
+
+	if (irr) {
+		dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
+				    irr);
+		/* Just warning */
+	}
+
+	return IRQ_HANDLED;
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
+{
+	unsigned int val, cache_params;
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	/* Unmask all AXI interrupt sources AXI_CFG1 register */
+	val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+	cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+	dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+		cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+
+	/* Clear all pending interrupts */
+	val = cc_ioread(drvdata, CC_REG(HOST_IRR));
+	dev_dbg(dev, "IRR=0x%08X\n", val);
+	cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
+
+	/* Unmask relevant interrupt cause */
+	val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+
+	if (drvdata->hw_rev >= CC_HW_REV_712)
+		val |= CC_GPR0_IRQ_MASK;
+
+	cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
+
+	cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
+
+	val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
+
+	if (is_probe)
+		dev_dbg(dev, "Cache params previous: 0x%08X\n", val);
+
+	cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
+	val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
+
+	if (is_probe)
+		dev_dbg(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
+			val, cache_params);
+
+	return 0;
+}
+
+static int init_cc_resources(struct platform_device *plat_dev)
+{
+	struct resource *req_mem_cc_regs = NULL;
+	struct cc_drvdata *new_drvdata;
+	struct device *dev = &plat_dev->dev;
+	struct device_node *np = dev->of_node;
+	u32 signature_val;
+	u64 dma_mask;
+	const struct cc_hw_data *hw_rev;
+	const struct of_device_id *dev_id;
+	struct clk *clk;
+	int rc = 0;
+
+	new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
+	if (!new_drvdata)
+		return -ENOMEM;
+
+	dev_id = of_match_node(arm_ccree_dev_of_match, np);
+	if (!dev_id)
+		return -ENODEV;
+
+	hw_rev = (struct cc_hw_data *)dev_id->data;
+	new_drvdata->hw_rev_name = hw_rev->name;
+	new_drvdata->hw_rev = hw_rev->rev;
+
+	if (hw_rev->rev >= CC_HW_REV_712) {
+		new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
+		new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
+		new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
+		new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
+	} else {
+		new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
+		new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
+		new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
+		new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
+	}
+
+	platform_set_drvdata(plat_dev, new_drvdata);
+	new_drvdata->plat_dev = plat_dev;
+
+	clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(clk))
+		switch (PTR_ERR(clk)) {
+		/* Clock is optional so this might be fine */
+		case -ENOENT:
+			break;
+
+		/* Clock not available, let's try again soon */
+		case -EPROBE_DEFER:
+			return -EPROBE_DEFER;
+
+		default:
+			dev_err(dev, "Error getting clock: %ld\n",
+				PTR_ERR(clk));
+			return PTR_ERR(clk);
+		}
+	new_drvdata->clk = clk;
+
+	new_drvdata->coherent = of_dma_is_coherent(np);
+
+	/* Get device resources */
+	/* First CC registers space */
+	req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+	/* Map registers space */
+	new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+	if (IS_ERR(new_drvdata->cc_base)) {
+		dev_err(dev, "Failed to ioremap registers");
+		return PTR_ERR(new_drvdata->cc_base);
+	}
+
+	dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+		req_mem_cc_regs);
+	dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+		&req_mem_cc_regs->start, new_drvdata->cc_base);
+
+	/* Then IRQ */
+	new_drvdata->irq = platform_get_irq(plat_dev, 0);
+	if (new_drvdata->irq < 0) {
+		dev_err(dev, "Failed getting IRQ resource\n");
+		return new_drvdata->irq;
+	}
+
+	rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
+			      IRQF_SHARED, "ccree", new_drvdata);
+	if (rc) {
+		dev_err(dev, "Could not register to interrupt %d\n",
+			new_drvdata->irq);
+		return rc;
+	}
+	dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+
+	init_completion(&new_drvdata->hw_queue_avail);
+
+	if (!plat_dev->dev.dma_mask)
+		plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
+
+	dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
+	while (dma_mask > 0x7fffffffUL) {
+		if (dma_supported(&plat_dev->dev, dma_mask)) {
+			rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
+			if (!rc)
+				break;
+		}
+		dma_mask >>= 1;
+	}
+
+	if (rc) {
+		dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask);
+		return rc;
+	}
+
+	rc = cc_clk_on(new_drvdata);
+	if (rc) {
+		dev_err(dev, "Failed to enable clock");
+		return rc;
+	}
+
+	/* Verify correct mapping */
+	signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+	if (signature_val != hw_rev->sig) {
+		dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+			signature_val, hw_rev->sig);
+		rc = -EINVAL;
+		goto post_clk_err;
+	}
+	dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+
+	/* Display HW versions */
+	dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
+		 hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
+		 DRV_MODULE_VERSION);
+
+	rc = init_cc_regs(new_drvdata, true);
+	if (rc) {
+		dev_err(dev, "init_cc_regs failed\n");
+		goto post_clk_err;
+	}
+
+	rc = cc_debugfs_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "Failed registering debugfs interface\n");
+		goto post_regs_err;
+	}
+
+	rc = cc_fips_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+		goto post_debugfs_err;
+	}
+	rc = cc_sram_mgr_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_sram_mgr_init failed\n");
+		goto post_fips_init_err;
+	}
+
+	new_drvdata->mlli_sram_addr =
+		cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+	if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
+		dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
+		rc = -ENOMEM;
+		goto post_sram_mgr_err;
+	}
+
+	rc = cc_req_mgr_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_req_mgr_init failed\n");
+		goto post_sram_mgr_err;
+	}
+
+	rc = cc_buffer_mgr_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "buffer_mgr_init failed\n");
+		goto post_req_mgr_err;
+	}
+
+	rc = cc_pm_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "ssi_power_mgr_init failed\n");
+		goto post_buf_mgr_err;
+	}
+
+	rc = cc_ivgen_init(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_ivgen_init failed\n");
+		goto post_power_mgr_err;
+	}
+
+	/* Allocate crypto algs */
+	rc = cc_cipher_alloc(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_cipher_alloc failed\n");
+		goto post_ivgen_err;
+	}
+
+	/* hash must be allocated before aead since hash exports APIs */
+	rc = cc_hash_alloc(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_hash_alloc failed\n");
+		goto post_cipher_err;
+	}
+
+	rc = cc_aead_alloc(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_aead_alloc failed\n");
+		goto post_hash_err;
+	}
+
+	/* If we got here and FIPS mode is enabled
+	 * it means all FIPS test passed, so let TEE
+	 * know we're good.
+	 */
+	cc_set_ree_fips_status(new_drvdata, true);
+
+	return 0;
+
+post_hash_err:
+	cc_hash_free(new_drvdata);
+post_cipher_err:
+	cc_cipher_free(new_drvdata);
+post_ivgen_err:
+	cc_ivgen_fini(new_drvdata);
+post_power_mgr_err:
+	cc_pm_fini(new_drvdata);
+post_buf_mgr_err:
+	 cc_buffer_mgr_fini(new_drvdata);
+post_req_mgr_err:
+	cc_req_mgr_fini(new_drvdata);
+post_sram_mgr_err:
+	cc_sram_mgr_fini(new_drvdata);
+post_fips_init_err:
+	cc_fips_fini(new_drvdata);
+post_debugfs_err:
+	cc_debugfs_fini(new_drvdata);
+post_regs_err:
+	fini_cc_regs(new_drvdata);
+post_clk_err:
+	cc_clk_off(new_drvdata);
+	return rc;
+}
+
+void fini_cc_regs(struct cc_drvdata *drvdata)
+{
+	/* Mask all interrupts */
+	cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
+}
+
+static void cleanup_cc_resources(struct platform_device *plat_dev)
+{
+	struct cc_drvdata *drvdata =
+		(struct cc_drvdata *)platform_get_drvdata(plat_dev);
+
+	cc_aead_free(drvdata);
+	cc_hash_free(drvdata);
+	cc_cipher_free(drvdata);
+	cc_ivgen_fini(drvdata);
+	cc_pm_fini(drvdata);
+	cc_buffer_mgr_fini(drvdata);
+	cc_req_mgr_fini(drvdata);
+	cc_sram_mgr_fini(drvdata);
+	cc_fips_fini(drvdata);
+	cc_debugfs_fini(drvdata);
+	fini_cc_regs(drvdata);
+	cc_clk_off(drvdata);
+}
+
+int cc_clk_on(struct cc_drvdata *drvdata)
+{
+	struct clk *clk = drvdata->clk;
+	int rc;
+
+	if (IS_ERR(clk))
+		/* Not all devices have a clock associated with CCREE  */
+		return 0;
+
+	rc = clk_prepare_enable(clk);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+void cc_clk_off(struct cc_drvdata *drvdata)
+{
+	struct clk *clk = drvdata->clk;
+
+	if (IS_ERR(clk))
+		/* Not all devices have a clock associated with CCREE */
+		return;
+
+	clk_disable_unprepare(clk);
+}
+
+static int ccree_probe(struct platform_device *plat_dev)
+{
+	int rc;
+	struct device *dev = &plat_dev->dev;
+
+	/* Map registers space */
+	rc = init_cc_resources(plat_dev);
+	if (rc)
+		return rc;
+
+	dev_info(dev, "ARM ccree device initialized\n");
+
+	return 0;
+}
+
+static int ccree_remove(struct platform_device *plat_dev)
+{
+	struct device *dev = &plat_dev->dev;
+
+	dev_dbg(dev, "Releasing ccree resources...\n");
+
+	cleanup_cc_resources(plat_dev);
+
+	dev_info(dev, "ARM ccree device terminated\n");
+
+	return 0;
+}
+
+static struct platform_driver ccree_driver = {
+	.driver = {
+		   .name = "ccree",
+		   .of_match_table = arm_ccree_dev_of_match,
+#ifdef CONFIG_PM
+		   .pm = &ccree_pm,
+#endif
+	},
+	.probe = ccree_probe,
+	.remove = ccree_remove,
+};
+
+static int __init ccree_init(void)
+{
+	int ret;
+
+	cc_hash_global_init();
+
+	ret = cc_debugfs_global_init();
+	if (ret)
+		return ret;
+
+	return platform_driver_register(&ccree_driver);
+}
+module_init(ccree_init);
+
+static void __exit ccree_exit(void)
+{
+	platform_driver_unregister(&ccree_driver);
+	cc_debugfs_global_fini();
+}
+module_exit(ccree_exit);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
new file mode 100644
index 0000000..d608a4f
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_driver.h
+ * ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __CC_DRIVER_H__
+#define __CC_DRIVER_H__
+
+#ifdef COMP_IN_WQ
+#include <linux/workqueue.h>
+#else
+#include <linux/interrupt.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* Registers definitions from shared/hw/ree_include */
+#include "cc_host_regs.h"
+#define CC_DEV_SHA_MAX 512
+#include "cc_crypto_ctx.h"
+#include "cc_hw_queue_defs.h"
+#include "cc_sram_mgr.h"
+
+extern bool cc_dump_desc;
+extern bool cc_dump_bytes;
+
+#define DRV_MODULE_VERSION "4.0"
+
+enum cc_hw_rev {
+	CC_HW_REV_630 = 630,
+	CC_HW_REV_710 = 710,
+	CC_HW_REV_712 = 712
+};
+
+#define CC_COHERENT_CACHE_PARAMS 0xEEE
+
+/* Maximum DMA mask supported by IP */
+#define DMA_BIT_MASK_LEN 48
+
+#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
+			  (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
+			  (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
+			  (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
+
+#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+
+#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+				    CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+				    CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
+
+/* TEE FIPS status interrupt */
+#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
+
+#define CC_CRA_PRIO 400
+
+#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
+
+#define MAX_REQUEST_QUEUE_SIZE 4096
+#define MAX_MLLI_BUFF_SIZE 2080
+#define MAX_ICV_NENTS_SUPPORTED 2
+
+/* Definitions for HW descriptors DIN/DOUT fields */
+#define NS_BIT 1
+#define AXI_ID 0
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ * field in the HW descriptor. The DMA engine +8 that value.
+ */
+
+#define CC_MAX_IVGEN_DMA_ADDRESSES	3
+struct cc_crypto_req {
+	void (*user_cb)(struct device *dev, void *req, int err);
+	void *user_arg;
+	dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
+	/* For the first 'ivgen_dma_addr_len' addresses of this array,
+	 * generated IV would be placed in it by send_request().
+	 * Same generated IV for all addresses!
+	 */
+	/* Amount of 'ivgen_dma_addr' elements to be filled. */
+	unsigned int ivgen_dma_addr_len;
+	/* The generated IV size required, 8/16 B allowed. */
+	unsigned int ivgen_size;
+	struct completion seq_compl; /* request completion */
+};
+
+/**
+ * struct cc_drvdata - driver private data context
+ * @cc_base:	virt address of the CC registers
+ * @irq:	device IRQ number
+ * @irq_mask:	Interrupt mask shadow (1 for masked interrupts)
+ * @fw_ver:	SeP loaded firmware version
+ */
+struct cc_drvdata {
+	void __iomem *cc_base;
+	int irq;
+	u32 irq_mask;
+	u32 fw_ver;
+	struct completion hw_queue_avail; /* wait for HW queue availability */
+	struct platform_device *plat_dev;
+	cc_sram_addr_t mlli_sram_addr;
+	void *buff_mgr_handle;
+	void *cipher_handle;
+	void *hash_handle;
+	void *aead_handle;
+	void *request_mgr_handle;
+	void *fips_handle;
+	void *ivgen_handle;
+	void *sram_mgr_handle;
+	void *debugfs;
+	struct clk *clk;
+	bool coherent;
+	char *hw_rev_name;
+	enum cc_hw_rev hw_rev;
+	u32 hash_len_sz;
+	u32 axim_mon_offset;
+	u32 sig_offset;
+	u32 ver_offset;
+};
+
+struct cc_crypto_alg {
+	struct list_head entry;
+	int cipher_mode;
+	int flow_mode; /* Note: currently, refers to the cipher mode only. */
+	int auth_mode;
+	unsigned int data_unit;
+	struct cc_drvdata *drvdata;
+	struct skcipher_alg skcipher_alg;
+	struct aead_alg aead_alg;
+};
+
+struct cc_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	union {
+		struct skcipher_alg skcipher;
+		struct aead_alg aead;
+	} template_u;
+	int cipher_mode;
+	int flow_mode; /* Note: currently, refers to the cipher mode only. */
+	int auth_mode;
+	u32 min_hw_rev;
+	unsigned int data_unit;
+	struct cc_drvdata *drvdata;
+};
+
+struct async_gen_req_ctx {
+	dma_addr_t iv_dma_addr;
+	enum drv_crypto_direction op_type;
+};
+
+static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
+{
+	return &drvdata->plat_dev->dev;
+}
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+				   size_t size)
+{
+	if (cc_dump_bytes)
+		__dump_byte_array(name, the_array, size);
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
+void fini_cc_regs(struct cc_drvdata *drvdata);
+int cc_clk_on(struct cc_drvdata *drvdata);
+void cc_clk_off(struct cc_drvdata *drvdata);
+
+static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
+{
+	iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
+{
+	return ioread32(drvdata->cc_base + reg);
+}
+
+static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
+{
+	return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+			GFP_KERNEL : GFP_ATOMIC;
+}
+
+static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
+				      struct cc_hw_desc *pdesc)
+{
+	if (drvdata->hw_rev >= CC_HW_REV_712)
+		set_queue_last_ind_bit(pdesc);
+}
+
+#endif /*__CC_DRIVER_H__*/
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
new file mode 100644
index 0000000..b4d0a6d
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/fips.h>
+
+#include "cc_driver.h"
+#include "cc_fips.h"
+
+static void fips_dsr(unsigned long devarg);
+
+struct cc_fips_handle {
+	struct tasklet_struct tasklet;
+};
+
+/* The function called once at driver entry point to check
+ * whether TEE FIPS error occurred.
+ */
+static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
+{
+	u32 reg;
+
+	reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
+	return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+}
+
+/*
+ * This function should push the FIPS REE library status towards the TEE library
+ * by writing the error state to HOST_GPR0 register.
+ */
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
+{
+	int val = CC_FIPS_SYNC_REE_STATUS;
+
+	if (drvdata->hw_rev < CC_HW_REV_712)
+		return;
+
+	val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
+
+	cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
+}
+
+void cc_fips_fini(struct cc_drvdata *drvdata)
+{
+	struct cc_fips_handle *fips_h = drvdata->fips_handle;
+
+	if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h)
+		return;
+
+	/* Kill tasklet */
+	tasklet_kill(&fips_h->tasklet);
+
+	kfree(fips_h);
+	drvdata->fips_handle = NULL;
+}
+
+void fips_handler(struct cc_drvdata *drvdata)
+{
+	struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
+
+	if (drvdata->hw_rev < CC_HW_REV_712)
+		return;
+
+	tasklet_schedule(&fips_handle_ptr->tasklet);
+}
+
+static inline void tee_fips_error(struct device *dev)
+{
+	if (fips_enabled)
+		panic("ccree: TEE reported cryptographic error in fips mode!\n");
+	else
+		dev_err(dev, "TEE reported error!\n");
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void fips_dsr(unsigned long devarg)
+{
+	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+	struct device *dev = drvdata_to_dev(drvdata);
+	u32 irq, state, val;
+
+	irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
+
+	if (irq) {
+		state = cc_ioread(drvdata, CC_REG(GPR_HOST));
+
+		if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
+			tee_fips_error(dev);
+	}
+
+	/* after verifing that there is nothing to do,
+	 * unmask AXI completion interrupt.
+	 */
+	val = (CC_REG(HOST_IMR) & ~irq);
+	cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
+}
+
+/* The function called once at driver entry point .*/
+int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+	struct cc_fips_handle *fips_h;
+	struct device *dev = drvdata_to_dev(p_drvdata);
+
+	if (p_drvdata->hw_rev < CC_HW_REV_712)
+		return 0;
+
+	fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+	if (!fips_h)
+		return -ENOMEM;
+
+	p_drvdata->fips_handle = fips_h;
+
+	dev_dbg(dev, "Initializing fips tasklet\n");
+	tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
+
+	if (!cc_get_tee_fips_status(p_drvdata))
+		tee_fips_error(dev);
+
+	return 0;
+}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
new file mode 100644
index 0000000..645e096
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_FIPS_H__
+#define __CC_FIPS_H__
+
+#ifdef CONFIG_CRYPTO_FIPS
+
+enum cc_fips_status {
+	CC_FIPS_SYNC_MODULE_OK = 0x0,
+	CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+	CC_FIPS_SYNC_REE_STATUS = 0x4,
+	CC_FIPS_SYNC_TEE_STATUS = 0x8,
+	CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
+};
+
+int cc_fips_init(struct cc_drvdata *p_drvdata);
+void cc_fips_fini(struct cc_drvdata *drvdata);
+void fips_handler(struct cc_drvdata *drvdata);
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+
+#else  /* CONFIG_CRYPTO_FIPS */
+
+static inline int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+	return 0;
+}
+
+static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
+					  bool ok) {}
+static inline void fips_handler(struct cc_drvdata *drvdata) {}
+
+#endif /* CONFIG_CRYPTO_FIPS */
+
+#endif  /*__CC_FIPS_H__*/
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
new file mode 100644
index 0000000..b931330
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -0,0 +1,2245 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/internal/hash.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define CC_MAX_HASH_SEQ_LEN 12
+#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
+
+struct cc_hash_handle {
+	cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
+	cc_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
+	struct list_head hash_list;
+};
+
+static const u32 digest_len_init[] = {
+	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
+static const u32 md5_init[] = {
+	SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 sha1_init[] = {
+	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 sha224_init[] = {
+	SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
+	SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
+static const u32 sha256_init[] = {
+	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
+	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
+static const u32 digest_len_sha512_init[] = {
+	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
+static u64 sha384_init[] = {
+	SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
+	SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
+static u64 sha512_init[] = {
+	SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
+	SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+			  unsigned int *seq_size);
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+			  unsigned int *seq_size);
+
+static const void *cc_larval_digest(struct device *dev, u32 mode);
+
+struct cc_hash_alg {
+	struct list_head entry;
+	int hash_mode;
+	int hw_mode;
+	int inter_digestsize;
+	struct cc_drvdata *drvdata;
+	struct ahash_alg ahash_alg;
+};
+
+struct hash_key_req_ctx {
+	u32 keylen;
+	dma_addr_t key_dma_addr;
+};
+
+/* hash per-session context */
+struct cc_hash_ctx {
+	struct cc_drvdata *drvdata;
+	/* holds the origin digest; the digest after "setkey" if HMAC,*
+	 * the initial digest if HASH.
+	 */
+	u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
+	u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE]  ____cacheline_aligned;
+
+	dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
+	dma_addr_t digest_buff_dma_addr;
+	/* use for hmac with key large then mode block size */
+	struct hash_key_req_ctx key_params;
+	int hash_mode;
+	int hw_mode;
+	int inter_digestsize;
+	struct completion setkey_comp;
+	bool is_hmac;
+};
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
+			unsigned int flow_mode, struct cc_hw_desc desc[],
+			bool is_not_last_data, unsigned int *seq_size);
+
+static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
+{
+	if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
+	    mode == DRV_HASH_SHA512) {
+		set_bytes_swap(desc, 1);
+	} else {
+		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+	}
+}
+
+static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
+			 unsigned int digestsize)
+{
+	state->digest_result_dma_addr =
+		dma_map_single(dev, state->digest_result_buff,
+			       digestsize, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
+		dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
+			digestsize);
+		return -ENOMEM;
+	}
+	dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+		digestsize, state->digest_result_buff,
+		&state->digest_result_dma_addr);
+
+	return 0;
+}
+
+static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
+			struct cc_hash_ctx *ctx)
+{
+	bool is_hmac = ctx->is_hmac;
+
+	memset(state, 0, sizeof(*state));
+
+	if (is_hmac) {
+		if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
+		    ctx->hw_mode != DRV_CIPHER_CMAC) {
+			dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
+						ctx->inter_digestsize,
+						DMA_BIDIRECTIONAL);
+
+			memcpy(state->digest_buff, ctx->digest_buff,
+			       ctx->inter_digestsize);
+			if (ctx->hash_mode == DRV_HASH_SHA512 ||
+			    ctx->hash_mode == DRV_HASH_SHA384)
+				memcpy(state->digest_bytes_len,
+				       digest_len_sha512_init,
+				       ctx->drvdata->hash_len_sz);
+			else
+				memcpy(state->digest_bytes_len, digest_len_init,
+				       ctx->drvdata->hash_len_sz);
+		}
+
+		if (ctx->hash_mode != DRV_HASH_NULL) {
+			dma_sync_single_for_cpu(dev,
+						ctx->opad_tmp_keys_dma_addr,
+						ctx->inter_digestsize,
+						DMA_BIDIRECTIONAL);
+			memcpy(state->opad_digest_buff,
+			       ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
+		}
+	} else { /*hash*/
+		/* Copy the initial digests if hash flow. */
+		const void *larval = cc_larval_digest(dev, ctx->hash_mode);
+
+		memcpy(state->digest_buff, larval, ctx->inter_digestsize);
+	}
+}
+
+static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
+		      struct cc_hash_ctx *ctx)
+{
+	bool is_hmac = ctx->is_hmac;
+
+	state->digest_buff_dma_addr =
+		dma_map_single(dev, state->digest_buff,
+			       ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
+		dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+			ctx->inter_digestsize, state->digest_buff);
+		return -EINVAL;
+	}
+	dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+		ctx->inter_digestsize, state->digest_buff,
+		&state->digest_buff_dma_addr);
+
+	if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
+		state->digest_bytes_len_dma_addr =
+			dma_map_single(dev, state->digest_bytes_len,
+				       HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
+			dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+				HASH_MAX_LEN_SIZE, state->digest_bytes_len);
+			goto unmap_digest_buf;
+		}
+		dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+			HASH_MAX_LEN_SIZE, state->digest_bytes_len,
+			&state->digest_bytes_len_dma_addr);
+	}
+
+	if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
+		state->opad_digest_dma_addr =
+			dma_map_single(dev, state->opad_digest_buff,
+				       ctx->inter_digestsize,
+				       DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
+			dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+				ctx->inter_digestsize,
+				state->opad_digest_buff);
+			goto unmap_digest_len;
+		}
+		dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+			ctx->inter_digestsize, state->opad_digest_buff,
+			&state->opad_digest_dma_addr);
+	}
+
+	return 0;
+
+unmap_digest_len:
+	if (state->digest_bytes_len_dma_addr) {
+		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+		state->digest_bytes_len_dma_addr = 0;
+	}
+unmap_digest_buf:
+	if (state->digest_buff_dma_addr) {
+		dma_unmap_single(dev, state->digest_buff_dma_addr,
+				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+		state->digest_buff_dma_addr = 0;
+	}
+
+	return -EINVAL;
+}
+
+static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
+			 struct cc_hash_ctx *ctx)
+{
+	if (state->digest_buff_dma_addr) {
+		dma_unmap_single(dev, state->digest_buff_dma_addr,
+				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+			&state->digest_buff_dma_addr);
+		state->digest_buff_dma_addr = 0;
+	}
+	if (state->digest_bytes_len_dma_addr) {
+		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+			&state->digest_bytes_len_dma_addr);
+		state->digest_bytes_len_dma_addr = 0;
+	}
+	if (state->opad_digest_dma_addr) {
+		dma_unmap_single(dev, state->opad_digest_dma_addr,
+				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+			&state->opad_digest_dma_addr);
+		state->opad_digest_dma_addr = 0;
+	}
+}
+
+static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
+			    unsigned int digestsize, u8 *result)
+{
+	if (state->digest_result_dma_addr) {
+		dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
+				 DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+			state->digest_result_buff,
+			&state->digest_result_dma_addr, digestsize);
+		memcpy(result, state->digest_result_buff, digestsize);
+	}
+	state->digest_result_dma_addr = 0;
+}
+
+static void cc_update_complete(struct device *dev, void *cc_req, int err)
+{
+	struct ahash_request *req = (struct ahash_request *)cc_req;
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	dev_dbg(dev, "req=%pK\n", req);
+
+	cc_unmap_hash_request(dev, state, req->src, false);
+	cc_unmap_req(dev, state, ctx);
+	req->base.complete(&req->base, err);
+}
+
+static void cc_digest_complete(struct device *dev, void *cc_req, int err)
+{
+	struct ahash_request *req = (struct ahash_request *)cc_req;
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
+	dev_dbg(dev, "req=%pK\n", req);
+
+	cc_unmap_hash_request(dev, state, req->src, false);
+	cc_unmap_result(dev, state, digestsize, req->result);
+	cc_unmap_req(dev, state, ctx);
+	req->base.complete(&req->base, err);
+}
+
+static void cc_hash_complete(struct device *dev, void *cc_req, int err)
+{
+	struct ahash_request *req = (struct ahash_request *)cc_req;
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
+	dev_dbg(dev, "req=%pK\n", req);
+
+	cc_unmap_hash_request(dev, state, req->src, false);
+	cc_unmap_result(dev, state, digestsize, req->result);
+	cc_unmap_req(dev, state, ctx);
+	req->base.complete(&req->base, err);
+}
+
+static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
+			 int idx)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
+	/* Get final MAC result */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	/* TODO */
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+		      NS_BIT, 1);
+	set_queue_last_ind(ctx->drvdata, &desc[idx]);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+	cc_set_endianity(ctx->hash_mode, &desc[idx]);
+	idx++;
+
+	return idx;
+}
+
+static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
+		       int idx)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
+	/* store the hash digest result in the context */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
+		      NS_BIT, 0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	cc_set_endianity(ctx->hash_mode, &desc[idx]);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	idx++;
+
+	/* Loading hash opad xor key state */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+		     ctx->inter_digestsize, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	idx++;
+
+	/* Load the hash current length */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_din_sram(&desc[idx],
+		     cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
+		     ctx->drvdata->hash_len_sz);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	idx++;
+
+	/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	idx++;
+
+	/* Perform HASH update */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     digestsize, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
+	idx++;
+
+	return idx;
+}
+
+static int cc_hash_digest(struct ahash_request *req)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+	struct scatterlist *src = req->src;
+	unsigned int nbytes = req->nbytes;
+	u8 *result = req->result;
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	bool is_hmac = ctx->is_hmac;
+	struct cc_crypto_req cc_req = {};
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+	cc_sram_addr_t larval_digest_addr =
+		cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+	int idx = 0;
+	int rc = 0;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
+		nbytes);
+
+	cc_init_req(dev, state, ctx);
+
+	if (cc_map_req(dev, state, ctx)) {
+		dev_err(dev, "map_ahash_source() failed\n");
+		return -ENOMEM;
+	}
+
+	if (cc_map_result(dev, state, digestsize)) {
+		dev_err(dev, "map_ahash_digest() failed\n");
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+
+	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+				      flags)) {
+		dev_err(dev, "map_ahash_request_final() failed\n");
+		cc_unmap_result(dev, state, digestsize, result);
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+
+	/* Setup request structure */
+	cc_req.user_cb = cc_digest_complete;
+	cc_req.user_arg = req;
+
+	/* If HMAC then load hash IPAD xor key, if HASH then load initial
+	 * digest
+	 */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	if (is_hmac) {
+		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+			     ctx->inter_digestsize, NS_BIT);
+	} else {
+		set_din_sram(&desc[idx], larval_digest_addr,
+			     ctx->inter_digestsize);
+	}
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	idx++;
+
+	/* Load the hash current length */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+
+	if (is_hmac) {
+		set_din_type(&desc[idx], DMA_DLLI,
+			     state->digest_bytes_len_dma_addr,
+			     ctx->drvdata->hash_len_sz, NS_BIT);
+	} else {
+		set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+		if (nbytes)
+			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+		else
+			set_cipher_do(&desc[idx], DO_PAD);
+	}
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	idx++;
+
+	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+	if (is_hmac) {
+		/* HW last hash block padding (aka. "DO_PAD") */
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+			      ctx->drvdata->hash_len_sz, NS_BIT, 0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+		set_cipher_do(&desc[idx], DO_PAD);
+		idx++;
+
+		idx = cc_fin_hmac(desc, req, idx);
+	}
+
+	idx = cc_fin_result(desc, req, idx);
+
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		cc_unmap_hash_request(dev, state, src, true);
+		cc_unmap_result(dev, state, digestsize, result);
+		cc_unmap_req(dev, state, ctx);
+	}
+	return rc;
+}
+
+static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
+			   struct ahash_req_ctx *state, unsigned int idx)
+{
+	/* Restore hash digest */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     ctx->inter_digestsize, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	idx++;
+
+	/* Restore hash current length */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+		     ctx->drvdata->hash_len_sz, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	idx++;
+
+	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+	return idx;
+}
+
+static int cc_hash_update(struct ahash_request *req)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+	struct scatterlist *src = req->src;
+	unsigned int nbytes = req->nbytes;
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct cc_crypto_req cc_req = {};
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+	u32 idx = 0;
+	int rc;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
+		"hmac" : "hash", nbytes);
+
+	if (nbytes == 0) {
+		/* no real updates required */
+		return 0;
+	}
+
+	rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
+					block_size, flags);
+	if (rc) {
+		if (rc == 1) {
+			dev_dbg(dev, " data size not require HW update %x\n",
+				nbytes);
+			/* No hardware updates are required */
+			return 0;
+		}
+		dev_err(dev, "map_ahash_request_update() failed\n");
+		return -ENOMEM;
+	}
+
+	if (cc_map_req(dev, state, ctx)) {
+		dev_err(dev, "map_ahash_source() failed\n");
+		cc_unmap_hash_request(dev, state, src, true);
+		return -EINVAL;
+	}
+
+	/* Setup request structure */
+	cc_req.user_cb = cc_update_complete;
+	cc_req.user_arg = req;
+
+	idx = cc_restore_hash(desc, ctx, state, idx);
+
+	/* store the hash digest result in context */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+		      ctx->inter_digestsize, NS_BIT, 0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	idx++;
+
+	/* store current hash length in context */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+		      ctx->drvdata->hash_len_sz, NS_BIT, 1);
+	set_queue_last_ind(ctx->drvdata, &desc[idx]);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+	idx++;
+
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		cc_unmap_hash_request(dev, state, src, true);
+		cc_unmap_req(dev, state, ctx);
+	}
+	return rc;
+}
+
+static int cc_do_finup(struct ahash_request *req, bool update)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+	struct scatterlist *src = req->src;
+	unsigned int nbytes = req->nbytes;
+	u8 *result = req->result;
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	bool is_hmac = ctx->is_hmac;
+	struct cc_crypto_req cc_req = {};
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+	unsigned int idx = 0;
+	int rc;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
+		update ? "finup" : "final", nbytes);
+
+	if (cc_map_req(dev, state, ctx)) {
+		dev_err(dev, "map_ahash_source() failed\n");
+		return -EINVAL;
+	}
+
+	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
+				      flags)) {
+		dev_err(dev, "map_ahash_request_final() failed\n");
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+	if (cc_map_result(dev, state, digestsize)) {
+		dev_err(dev, "map_ahash_digest() failed\n");
+		cc_unmap_hash_request(dev, state, src, true);
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+
+	/* Setup request structure */
+	cc_req.user_cb = cc_hash_complete;
+	cc_req.user_arg = req;
+
+	idx = cc_restore_hash(desc, ctx, state, idx);
+
+	/* Pad the hash */
+	hw_desc_init(&desc[idx]);
+	set_cipher_do(&desc[idx], DO_PAD);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+		      ctx->drvdata->hash_len_sz, NS_BIT, 0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	idx++;
+
+	if (is_hmac)
+		idx = cc_fin_hmac(desc, req, idx);
+
+	idx = cc_fin_result(desc, req, idx);
+
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		cc_unmap_hash_request(dev, state, src, true);
+		cc_unmap_result(dev, state, digestsize, result);
+		cc_unmap_req(dev, state, ctx);
+	}
+	return rc;
+}
+
+static int cc_hash_finup(struct ahash_request *req)
+{
+	return cc_do_finup(req, true);
+}
+
+
+static int cc_hash_final(struct ahash_request *req)
+{
+	return cc_do_finup(req, false);
+}
+
+static int cc_hash_init(struct ahash_request *req)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
+
+	cc_init_req(dev, state, ctx);
+
+	return 0;
+}
+
+static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
+			  unsigned int keylen)
+{
+	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+	struct cc_crypto_req cc_req = {};
+	struct cc_hash_ctx *ctx = NULL;
+	int blocksize = 0;
+	int digestsize = 0;
+	int i, idx = 0, rc = 0;
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+	cc_sram_addr_t larval_addr;
+	struct device *dev;
+
+	ctx = crypto_ahash_ctx(ahash);
+	dev = drvdata_to_dev(ctx->drvdata);
+	dev_dbg(dev, "start keylen: %d", keylen);
+
+	blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+	digestsize = crypto_ahash_digestsize(ahash);
+
+	larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+
+	/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
+	 * any NON-ZERO value utilizes HMAC flow
+	 */
+	ctx->key_params.keylen = keylen;
+	ctx->key_params.key_dma_addr = 0;
+	ctx->is_hmac = true;
+
+	if (keylen) {
+		ctx->key_params.key_dma_addr =
+			dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+				key, keylen);
+			return -ENOMEM;
+		}
+		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+		if (keylen > blocksize) {
+			/* Load hash initial state */
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], ctx->hw_mode);
+			set_din_sram(&desc[idx], larval_addr,
+				     ctx->inter_digestsize);
+			set_flow_mode(&desc[idx], S_DIN_to_HASH);
+			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+			idx++;
+
+			/* Load the hash current length*/
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], ctx->hw_mode);
+			set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+			set_flow_mode(&desc[idx], S_DIN_to_HASH);
+			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+			idx++;
+
+			hw_desc_init(&desc[idx]);
+			set_din_type(&desc[idx], DMA_DLLI,
+				     ctx->key_params.key_dma_addr, keylen,
+				     NS_BIT);
+			set_flow_mode(&desc[idx], DIN_HASH);
+			idx++;
+
+			/* Get hashed key */
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], ctx->hw_mode);
+			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+				      digestsize, NS_BIT, 0);
+			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+			cc_set_endianity(ctx->hash_mode, &desc[idx]);
+			idx++;
+
+			hw_desc_init(&desc[idx]);
+			set_din_const(&desc[idx], 0, (blocksize - digestsize));
+			set_flow_mode(&desc[idx], BYPASS);
+			set_dout_dlli(&desc[idx],
+				      (ctx->opad_tmp_keys_dma_addr +
+				       digestsize),
+				      (blocksize - digestsize), NS_BIT, 0);
+			idx++;
+		} else {
+			hw_desc_init(&desc[idx]);
+			set_din_type(&desc[idx], DMA_DLLI,
+				     ctx->key_params.key_dma_addr, keylen,
+				     NS_BIT);
+			set_flow_mode(&desc[idx], BYPASS);
+			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+				      keylen, NS_BIT, 0);
+			idx++;
+
+			if ((blocksize - keylen)) {
+				hw_desc_init(&desc[idx]);
+				set_din_const(&desc[idx], 0,
+					      (blocksize - keylen));
+				set_flow_mode(&desc[idx], BYPASS);
+				set_dout_dlli(&desc[idx],
+					      (ctx->opad_tmp_keys_dma_addr +
+					       keylen), (blocksize - keylen),
+					      NS_BIT, 0);
+				idx++;
+			}
+		}
+	} else {
+		hw_desc_init(&desc[idx]);
+		set_din_const(&desc[idx], 0, blocksize);
+		set_flow_mode(&desc[idx], BYPASS);
+		set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
+			      blocksize, NS_BIT, 0);
+		idx++;
+	}
+
+	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+	if (rc) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		goto out;
+	}
+
+	/* calc derived HMAC key */
+	for (idx = 0, i = 0; i < 2; i++) {
+		/* Load hash initial state */
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+		idx++;
+
+		/* Load the hash current length*/
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+		idx++;
+
+		/* Prepare ipad key */
+		hw_desc_init(&desc[idx]);
+		set_xor_val(&desc[idx], hmac_pad_const[i]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+		idx++;
+
+		/* Perform HASH update */
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+			     blocksize, NS_BIT);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_xor_active(&desc[idx]);
+		set_flow_mode(&desc[idx], DIN_HASH);
+		idx++;
+
+		/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
+		 * of the first HASH "update" state)
+		 */
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		if (i > 0) /* Not first iteration */
+			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+				      ctx->inter_digestsize, NS_BIT, 0);
+		else /* First iteration */
+			set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
+				      ctx->inter_digestsize, NS_BIT, 0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+		idx++;
+	}
+
+	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+out:
+	if (rc)
+		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+	if (ctx->key_params.key_dma_addr) {
+		dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+				 ctx->key_params.keylen, DMA_TO_DEVICE);
+		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+	}
+	return rc;
+}
+
+static int cc_xcbc_setkey(struct crypto_ahash *ahash,
+			  const u8 *key, unsigned int keylen)
+{
+	struct cc_crypto_req cc_req = {};
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	int rc = 0;
+	unsigned int idx = 0;
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+
+	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_192:
+	case AES_KEYSIZE_256:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ctx->key_params.keylen = keylen;
+
+	ctx->key_params.key_dma_addr =
+		dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+			key, keylen);
+		return -ENOMEM;
+	}
+	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+	ctx->is_hmac = true;
+	/* 1. Load the AES key */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
+		     keylen, NS_BIT);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_key_size_aes(&desc[idx], keylen);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	idx++;
+
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	set_dout_dlli(&desc[idx],
+		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+	idx++;
+
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	set_dout_dlli(&desc[idx],
+		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+	idx++;
+
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	set_dout_dlli(&desc[idx],
+		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+	idx++;
+
+	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+	if (rc)
+		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+	dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+			 ctx->key_params.keylen, DMA_TO_DEVICE);
+	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+	return rc;
+}
+
+static int cc_cmac_setkey(struct crypto_ahash *ahash,
+			  const u8 *key, unsigned int keylen)
+{
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+	ctx->is_hmac = true;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_192:
+	case AES_KEYSIZE_256:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ctx->key_params.keylen = keylen;
+
+	/* STAT_PHASE_1: Copy key to ctx */
+
+	dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
+				keylen, DMA_TO_DEVICE);
+
+	memcpy(ctx->opad_tmp_keys_buff, key, keylen);
+	if (keylen == 24) {
+		memset(ctx->opad_tmp_keys_buff + 24, 0,
+		       CC_AES_KEY_SIZE_MAX - 24);
+	}
+
+	dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
+				   keylen, DMA_TO_DEVICE);
+
+	ctx->key_params.keylen = keylen;
+
+	return 0;
+}
+
+static void cc_free_ctx(struct cc_hash_ctx *ctx)
+{
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	if (ctx->digest_buff_dma_addr) {
+		dma_unmap_single(dev, ctx->digest_buff_dma_addr,
+				 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+			&ctx->digest_buff_dma_addr);
+		ctx->digest_buff_dma_addr = 0;
+	}
+	if (ctx->opad_tmp_keys_dma_addr) {
+		dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
+				 sizeof(ctx->opad_tmp_keys_buff),
+				 DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
+			&ctx->opad_tmp_keys_dma_addr);
+		ctx->opad_tmp_keys_dma_addr = 0;
+	}
+
+	ctx->key_params.keylen = 0;
+}
+
+static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
+{
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	ctx->key_params.keylen = 0;
+
+	ctx->digest_buff_dma_addr =
+		dma_map_single(dev, (void *)ctx->digest_buff,
+			       sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
+		dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+			sizeof(ctx->digest_buff), ctx->digest_buff);
+		goto fail;
+	}
+	dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+		sizeof(ctx->digest_buff), ctx->digest_buff,
+		&ctx->digest_buff_dma_addr);
+
+	ctx->opad_tmp_keys_dma_addr =
+		dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+			       sizeof(ctx->opad_tmp_keys_buff),
+			       DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
+		dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+			sizeof(ctx->opad_tmp_keys_buff),
+			ctx->opad_tmp_keys_buff);
+		goto fail;
+	}
+	dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+		&ctx->opad_tmp_keys_dma_addr);
+
+	ctx->is_hmac = false;
+	return 0;
+
+fail:
+	cc_free_ctx(ctx);
+	return -ENOMEM;
+}
+
+static int cc_cra_init(struct crypto_tfm *tfm)
+{
+	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct hash_alg_common *hash_alg_common =
+		container_of(tfm->__crt_alg, struct hash_alg_common, base);
+	struct ahash_alg *ahash_alg =
+		container_of(hash_alg_common, struct ahash_alg, halg);
+	struct cc_hash_alg *cc_alg =
+			container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct ahash_req_ctx));
+
+	ctx->hash_mode = cc_alg->hash_mode;
+	ctx->hw_mode = cc_alg->hw_mode;
+	ctx->inter_digestsize = cc_alg->inter_digestsize;
+	ctx->drvdata = cc_alg->drvdata;
+
+	return cc_alloc_ctx(ctx);
+}
+
+static void cc_cra_exit(struct crypto_tfm *tfm)
+{
+	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	dev_dbg(dev, "cc_cra_exit");
+	cc_free_ctx(ctx);
+}
+
+static int cc_mac_update(struct ahash_request *req)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+	struct cc_crypto_req cc_req = {};
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+	int rc;
+	u32 idx = 0;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	if (req->nbytes == 0) {
+		/* no real updates required */
+		return 0;
+	}
+
+	state->xcbc_count++;
+
+	rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
+					req->nbytes, block_size, flags);
+	if (rc) {
+		if (rc == 1) {
+			dev_dbg(dev, " data size not require HW update %x\n",
+				req->nbytes);
+			/* No hardware updates are required */
+			return 0;
+		}
+		dev_err(dev, "map_ahash_request_update() failed\n");
+		return -ENOMEM;
+	}
+
+	if (cc_map_req(dev, state, ctx)) {
+		dev_err(dev, "map_ahash_source() failed\n");
+		return -EINVAL;
+	}
+
+	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+		cc_setup_xcbc(req, desc, &idx);
+	else
+		cc_setup_cmac(req, desc, &idx);
+
+	cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
+
+	/* store the hash digest result in context */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+		      ctx->inter_digestsize, NS_BIT, 1);
+	set_queue_last_ind(ctx->drvdata, &desc[idx]);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	idx++;
+
+	/* Setup request structure */
+	cc_req.user_cb = (void *)cc_update_complete;
+	cc_req.user_arg = (void *)req;
+
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		cc_unmap_hash_request(dev, state, req->src, true);
+		cc_unmap_req(dev, state, ctx);
+	}
+	return rc;
+}
+
+static int cc_mac_final(struct ahash_request *req)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct cc_crypto_req cc_req = {};
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+	int idx = 0;
+	int rc = 0;
+	u32 key_size, key_len;
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+	gfp_t flags = cc_gfp_flags(&req->base);
+	u32 rem_cnt = *cc_hash_buf_cnt(state);
+
+	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+		key_size = CC_AES_128_BIT_KEY_SIZE;
+		key_len  = CC_AES_128_BIT_KEY_SIZE;
+	} else {
+		key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+			ctx->key_params.keylen;
+		key_len =  ctx->key_params.keylen;
+	}
+
+	dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
+
+	if (cc_map_req(dev, state, ctx)) {
+		dev_err(dev, "map_ahash_source() failed\n");
+		return -EINVAL;
+	}
+
+	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+				      req->nbytes, 0, flags)) {
+		dev_err(dev, "map_ahash_request_final() failed\n");
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+
+	if (cc_map_result(dev, state, digestsize)) {
+		dev_err(dev, "map_ahash_digest() failed\n");
+		cc_unmap_hash_request(dev, state, req->src, true);
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+
+	/* Setup request structure */
+	cc_req.user_cb = (void *)cc_hash_complete;
+	cc_req.user_arg = (void *)req;
+
+	if (state->xcbc_count && rem_cnt == 0) {
+		/* Load key for ECB decryption */
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+			     key_size, NS_BIT);
+		set_key_size_aes(&desc[idx], key_len);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+		idx++;
+
+		/* Initiate decryption of block state to previous
+		 * block_state-XOR-M[n]
+		 */
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+			     CC_AES_BLOCK_SIZE, NS_BIT);
+		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+			      CC_AES_BLOCK_SIZE, NS_BIT, 0);
+		set_flow_mode(&desc[idx], DIN_AES_DOUT);
+		idx++;
+
+		/* Memory Barrier: wait for axi write to complete */
+		hw_desc_init(&desc[idx]);
+		set_din_no_dma(&desc[idx], 0, 0xfffff0);
+		set_dout_no_dma(&desc[idx], 0, 0, 1);
+		idx++;
+	}
+
+	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+		cc_setup_xcbc(req, desc, &idx);
+	else
+		cc_setup_cmac(req, desc, &idx);
+
+	if (state->xcbc_count == 0) {
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_key_size_aes(&desc[idx], key_len);
+		set_cmac_size0_mode(&desc[idx]);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
+		idx++;
+	} else if (rem_cnt > 0) {
+		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+	} else {
+		hw_desc_init(&desc[idx]);
+		set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
+		set_flow_mode(&desc[idx], DIN_AES_DOUT);
+		idx++;
+	}
+
+	/* Get final MAC result */
+	hw_desc_init(&desc[idx]);
+	/* TODO */
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+		      digestsize, NS_BIT, 1);
+	set_queue_last_ind(ctx->drvdata, &desc[idx]);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	idx++;
+
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		cc_unmap_hash_request(dev, state, req->src, true);
+		cc_unmap_result(dev, state, digestsize, req->result);
+		cc_unmap_req(dev, state, ctx);
+	}
+	return rc;
+}
+
+static int cc_mac_finup(struct ahash_request *req)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct cc_crypto_req cc_req = {};
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+	int idx = 0;
+	int rc = 0;
+	u32 key_len = 0;
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
+	if (state->xcbc_count > 0 && req->nbytes == 0) {
+		dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
+		return cc_mac_final(req);
+	}
+
+	if (cc_map_req(dev, state, ctx)) {
+		dev_err(dev, "map_ahash_source() failed\n");
+		return -EINVAL;
+	}
+
+	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+				      req->nbytes, 1, flags)) {
+		dev_err(dev, "map_ahash_request_final() failed\n");
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+	if (cc_map_result(dev, state, digestsize)) {
+		dev_err(dev, "map_ahash_digest() failed\n");
+		cc_unmap_hash_request(dev, state, req->src, true);
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+
+	/* Setup request structure */
+	cc_req.user_cb = (void *)cc_hash_complete;
+	cc_req.user_arg = (void *)req;
+
+	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+		key_len = CC_AES_128_BIT_KEY_SIZE;
+		cc_setup_xcbc(req, desc, &idx);
+	} else {
+		key_len = ctx->key_params.keylen;
+		cc_setup_cmac(req, desc, &idx);
+	}
+
+	if (req->nbytes == 0) {
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_key_size_aes(&desc[idx], key_len);
+		set_cmac_size0_mode(&desc[idx]);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
+		idx++;
+	} else {
+		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+	}
+
+	/* Get final MAC result */
+	hw_desc_init(&desc[idx]);
+	/* TODO */
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+		      digestsize, NS_BIT, 1);
+	set_queue_last_ind(ctx->drvdata, &desc[idx]);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	idx++;
+
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		cc_unmap_hash_request(dev, state, req->src, true);
+		cc_unmap_result(dev, state, digestsize, req->result);
+		cc_unmap_req(dev, state, ctx);
+	}
+	return rc;
+}
+
+static int cc_mac_digest(struct ahash_request *req)
+{
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+	struct cc_crypto_req cc_req = {};
+	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+	u32 key_len;
+	unsigned int idx = 0;
+	int rc;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
+
+	cc_init_req(dev, state, ctx);
+
+	if (cc_map_req(dev, state, ctx)) {
+		dev_err(dev, "map_ahash_source() failed\n");
+		return -ENOMEM;
+	}
+	if (cc_map_result(dev, state, digestsize)) {
+		dev_err(dev, "map_ahash_digest() failed\n");
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+
+	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+				      req->nbytes, 1, flags)) {
+		dev_err(dev, "map_ahash_request_final() failed\n");
+		cc_unmap_req(dev, state, ctx);
+		return -ENOMEM;
+	}
+
+	/* Setup request structure */
+	cc_req.user_cb = (void *)cc_digest_complete;
+	cc_req.user_arg = (void *)req;
+
+	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+		key_len = CC_AES_128_BIT_KEY_SIZE;
+		cc_setup_xcbc(req, desc, &idx);
+	} else {
+		key_len = ctx->key_params.keylen;
+		cc_setup_cmac(req, desc, &idx);
+	}
+
+	if (req->nbytes == 0) {
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_key_size_aes(&desc[idx], key_len);
+		set_cmac_size0_mode(&desc[idx]);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
+		idx++;
+	} else {
+		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+	}
+
+	/* Get final MAC result */
+	hw_desc_init(&desc[idx]);
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+		      CC_AES_BLOCK_SIZE, NS_BIT, 1);
+	set_queue_last_ind(ctx->drvdata, &desc[idx]);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	idx++;
+
+	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+		cc_unmap_hash_request(dev, state, req->src, true);
+		cc_unmap_result(dev, state, digestsize, req->result);
+		cc_unmap_req(dev, state, ctx);
+	}
+	return rc;
+}
+
+static int cc_hash_export(struct ahash_request *req, void *out)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	u8 *curr_buff = cc_hash_buf(state);
+	u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
+	const u32 tmp = CC_EXPORT_MAGIC;
+
+	memcpy(out, &tmp, sizeof(u32));
+	out += sizeof(u32);
+
+	memcpy(out, state->digest_buff, ctx->inter_digestsize);
+	out += ctx->inter_digestsize;
+
+	memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
+	out += ctx->drvdata->hash_len_sz;
+
+	memcpy(out, &curr_buff_cnt, sizeof(u32));
+	out += sizeof(u32);
+
+	memcpy(out, curr_buff, curr_buff_cnt);
+
+	return 0;
+}
+
+static int cc_hash_import(struct ahash_request *req, const void *in)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	u32 tmp;
+
+	memcpy(&tmp, in, sizeof(u32));
+	if (tmp != CC_EXPORT_MAGIC)
+		return -EINVAL;
+	in += sizeof(u32);
+
+	cc_init_req(dev, state, ctx);
+
+	memcpy(state->digest_buff, in, ctx->inter_digestsize);
+	in += ctx->inter_digestsize;
+
+	memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
+	in += ctx->drvdata->hash_len_sz;
+
+	/* Sanity check the data as much as possible */
+	memcpy(&tmp, in, sizeof(u32));
+	if (tmp > CC_MAX_HASH_BLCK_SIZE)
+		return -EINVAL;
+	in += sizeof(u32);
+
+	state->buf_cnt[0] = tmp;
+	memcpy(state->buffers[0], in, tmp);
+
+	return 0;
+}
+
+struct cc_hash_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	char mac_name[CRYPTO_MAX_ALG_NAME];
+	char mac_driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	bool synchronize;
+	struct ahash_alg template_ahash;
+	int hash_mode;
+	int hw_mode;
+	int inter_digestsize;
+	struct cc_drvdata *drvdata;
+	u32 min_hw_rev;
+};
+
+#define CC_STATE_SIZE(_x) \
+	((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
+
+/* hash descriptors */
+static struct cc_hash_template driver_hash[] = {
+	//Asynchronize hash template
+	{
+		.name = "sha1",
+		.driver_name = "sha1-ccree",
+		.mac_name = "hmac(sha1)",
+		.mac_driver_name = "hmac-sha1-ccree",
+		.blocksize = SHA1_BLOCK_SIZE,
+		.synchronize = false,
+		.template_ahash = {
+			.init = cc_hash_init,
+			.update = cc_hash_update,
+			.final = cc_hash_final,
+			.finup = cc_hash_finup,
+			.digest = cc_hash_digest,
+			.export = cc_hash_export,
+			.import = cc_hash_import,
+			.setkey = cc_hash_setkey,
+			.halg = {
+				.digestsize = SHA1_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
+			},
+		},
+		.hash_mode = DRV_HASH_SHA1,
+		.hw_mode = DRV_HASH_HW_SHA1,
+		.inter_digestsize = SHA1_DIGEST_SIZE,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "sha256",
+		.driver_name = "sha256-ccree",
+		.mac_name = "hmac(sha256)",
+		.mac_driver_name = "hmac-sha256-ccree",
+		.blocksize = SHA256_BLOCK_SIZE,
+		.template_ahash = {
+			.init = cc_hash_init,
+			.update = cc_hash_update,
+			.final = cc_hash_final,
+			.finup = cc_hash_finup,
+			.digest = cc_hash_digest,
+			.export = cc_hash_export,
+			.import = cc_hash_import,
+			.setkey = cc_hash_setkey,
+			.halg = {
+				.digestsize = SHA256_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
+			},
+		},
+		.hash_mode = DRV_HASH_SHA256,
+		.hw_mode = DRV_HASH_HW_SHA256,
+		.inter_digestsize = SHA256_DIGEST_SIZE,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "sha224",
+		.driver_name = "sha224-ccree",
+		.mac_name = "hmac(sha224)",
+		.mac_driver_name = "hmac-sha224-ccree",
+		.blocksize = SHA224_BLOCK_SIZE,
+		.template_ahash = {
+			.init = cc_hash_init,
+			.update = cc_hash_update,
+			.final = cc_hash_final,
+			.finup = cc_hash_finup,
+			.digest = cc_hash_digest,
+			.export = cc_hash_export,
+			.import = cc_hash_import,
+			.setkey = cc_hash_setkey,
+			.halg = {
+				.digestsize = SHA224_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+			},
+		},
+		.hash_mode = DRV_HASH_SHA224,
+		.hw_mode = DRV_HASH_HW_SHA256,
+		.inter_digestsize = SHA256_DIGEST_SIZE,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.name = "sha384",
+		.driver_name = "sha384-ccree",
+		.mac_name = "hmac(sha384)",
+		.mac_driver_name = "hmac-sha384-ccree",
+		.blocksize = SHA384_BLOCK_SIZE,
+		.template_ahash = {
+			.init = cc_hash_init,
+			.update = cc_hash_update,
+			.final = cc_hash_final,
+			.finup = cc_hash_finup,
+			.digest = cc_hash_digest,
+			.export = cc_hash_export,
+			.import = cc_hash_import,
+			.setkey = cc_hash_setkey,
+			.halg = {
+				.digestsize = SHA384_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+			},
+		},
+		.hash_mode = DRV_HASH_SHA384,
+		.hw_mode = DRV_HASH_HW_SHA512,
+		.inter_digestsize = SHA512_DIGEST_SIZE,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "sha512",
+		.driver_name = "sha512-ccree",
+		.mac_name = "hmac(sha512)",
+		.mac_driver_name = "hmac-sha512-ccree",
+		.blocksize = SHA512_BLOCK_SIZE,
+		.template_ahash = {
+			.init = cc_hash_init,
+			.update = cc_hash_update,
+			.final = cc_hash_final,
+			.finup = cc_hash_finup,
+			.digest = cc_hash_digest,
+			.export = cc_hash_export,
+			.import = cc_hash_import,
+			.setkey = cc_hash_setkey,
+			.halg = {
+				.digestsize = SHA512_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
+			},
+		},
+		.hash_mode = DRV_HASH_SHA512,
+		.hw_mode = DRV_HASH_HW_SHA512,
+		.inter_digestsize = SHA512_DIGEST_SIZE,
+		.min_hw_rev = CC_HW_REV_712,
+	},
+	{
+		.name = "md5",
+		.driver_name = "md5-ccree",
+		.mac_name = "hmac(md5)",
+		.mac_driver_name = "hmac-md5-ccree",
+		.blocksize = MD5_HMAC_BLOCK_SIZE,
+		.template_ahash = {
+			.init = cc_hash_init,
+			.update = cc_hash_update,
+			.final = cc_hash_final,
+			.finup = cc_hash_finup,
+			.digest = cc_hash_digest,
+			.export = cc_hash_export,
+			.import = cc_hash_import,
+			.setkey = cc_hash_setkey,
+			.halg = {
+				.digestsize = MD5_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
+			},
+		},
+		.hash_mode = DRV_HASH_MD5,
+		.hw_mode = DRV_HASH_HW_MD5,
+		.inter_digestsize = MD5_DIGEST_SIZE,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.mac_name = "xcbc(aes)",
+		.mac_driver_name = "xcbc-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_ahash = {
+			.init = cc_hash_init,
+			.update = cc_mac_update,
+			.final = cc_mac_final,
+			.finup = cc_mac_finup,
+			.digest = cc_mac_digest,
+			.setkey = cc_xcbc_setkey,
+			.export = cc_hash_export,
+			.import = cc_hash_import,
+			.halg = {
+				.digestsize = AES_BLOCK_SIZE,
+				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+			},
+		},
+		.hash_mode = DRV_HASH_NULL,
+		.hw_mode = DRV_CIPHER_XCBC_MAC,
+		.inter_digestsize = AES_BLOCK_SIZE,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+	{
+		.mac_name = "cmac(aes)",
+		.mac_driver_name = "cmac-aes-ccree",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_ahash = {
+			.init = cc_hash_init,
+			.update = cc_mac_update,
+			.final = cc_mac_final,
+			.finup = cc_mac_finup,
+			.digest = cc_mac_digest,
+			.setkey = cc_cmac_setkey,
+			.export = cc_hash_export,
+			.import = cc_hash_import,
+			.halg = {
+				.digestsize = AES_BLOCK_SIZE,
+				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+			},
+		},
+		.hash_mode = DRV_HASH_NULL,
+		.hw_mode = DRV_CIPHER_CMAC,
+		.inter_digestsize = AES_BLOCK_SIZE,
+		.min_hw_rev = CC_HW_REV_630,
+	},
+};
+
+static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
+					     struct device *dev, bool keyed)
+{
+	struct cc_hash_alg *t_crypto_alg;
+	struct crypto_alg *alg;
+	struct ahash_alg *halg;
+
+	t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
+	if (!t_crypto_alg)
+		return ERR_PTR(-ENOMEM);
+
+	t_crypto_alg->ahash_alg = template->template_ahash;
+	halg = &t_crypto_alg->ahash_alg;
+	alg = &halg->halg.base;
+
+	if (keyed) {
+		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->mac_name);
+		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->mac_driver_name);
+	} else {
+		halg->setkey = NULL;
+		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->name);
+		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->driver_name);
+	}
+	alg->cra_module = THIS_MODULE;
+	alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
+	alg->cra_priority = CC_CRA_PRIO;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_exit = cc_cra_exit;
+
+	alg->cra_init = cc_cra_init;
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+	t_crypto_alg->hash_mode = template->hash_mode;
+	t_crypto_alg->hw_mode = template->hw_mode;
+	t_crypto_alg->inter_digestsize = template->inter_digestsize;
+
+	return t_crypto_alg;
+}
+
+int cc_init_hash_sram(struct cc_drvdata *drvdata)
+{
+	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+	cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
+	unsigned int larval_seq_len = 0;
+	struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+	bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
+	int rc = 0;
+
+	/* Copy-to-sram digest-len */
+	cc_set_sram_desc(digest_len_init, sram_buff_ofs,
+			 ARRAY_SIZE(digest_len_init), larval_seq,
+			 &larval_seq_len);
+	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+	if (rc)
+		goto init_digest_const_err;
+
+	sram_buff_ofs += sizeof(digest_len_init);
+	larval_seq_len = 0;
+
+	if (large_sha_supported) {
+		/* Copy-to-sram digest-len for sha384/512 */
+		cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
+				 ARRAY_SIZE(digest_len_sha512_init),
+				 larval_seq, &larval_seq_len);
+		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+		if (rc)
+			goto init_digest_const_err;
+
+		sram_buff_ofs += sizeof(digest_len_sha512_init);
+		larval_seq_len = 0;
+	}
+
+	/* The initial digests offset */
+	hash_handle->larval_digest_sram_addr = sram_buff_ofs;
+
+	/* Copy-to-sram initial SHA* digests */
+	cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
+			 larval_seq, &larval_seq_len);
+	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+	if (rc)
+		goto init_digest_const_err;
+	sram_buff_ofs += sizeof(md5_init);
+	larval_seq_len = 0;
+
+	cc_set_sram_desc(sha1_init, sram_buff_ofs,
+			 ARRAY_SIZE(sha1_init), larval_seq,
+			 &larval_seq_len);
+	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+	if (rc)
+		goto init_digest_const_err;
+	sram_buff_ofs += sizeof(sha1_init);
+	larval_seq_len = 0;
+
+	cc_set_sram_desc(sha224_init, sram_buff_ofs,
+			 ARRAY_SIZE(sha224_init), larval_seq,
+			 &larval_seq_len);
+	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+	if (rc)
+		goto init_digest_const_err;
+	sram_buff_ofs += sizeof(sha224_init);
+	larval_seq_len = 0;
+
+	cc_set_sram_desc(sha256_init, sram_buff_ofs,
+			 ARRAY_SIZE(sha256_init), larval_seq,
+			 &larval_seq_len);
+	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+	if (rc)
+		goto init_digest_const_err;
+	sram_buff_ofs += sizeof(sha256_init);
+	larval_seq_len = 0;
+
+	if (large_sha_supported) {
+		cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
+				 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
+				 &larval_seq_len);
+		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+		if (rc)
+			goto init_digest_const_err;
+		sram_buff_ofs += sizeof(sha384_init);
+		larval_seq_len = 0;
+
+		cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
+				 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
+				 &larval_seq_len);
+		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+		if (rc)
+			goto init_digest_const_err;
+	}
+
+init_digest_const_err:
+	return rc;
+}
+
+static void __init cc_swap_dwords(u32 *buf, unsigned long size)
+{
+	int i;
+	u32 tmp;
+
+	for (i = 0; i < size; i += 2) {
+		tmp = buf[i];
+		buf[i] = buf[i + 1];
+		buf[i + 1] = tmp;
+	}
+}
+
+/*
+ * Due to the way the HW works we need to swap every
+ * double word in the SHA384 and SHA512 larval hashes
+ */
+void __init cc_hash_global_init(void)
+{
+	cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
+	cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata)
+{
+	struct cc_hash_handle *hash_handle;
+	cc_sram_addr_t sram_buff;
+	u32 sram_size_to_alloc;
+	struct device *dev = drvdata_to_dev(drvdata);
+	int rc = 0;
+	int alg;
+
+	hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
+	if (!hash_handle)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&hash_handle->hash_list);
+	drvdata->hash_handle = hash_handle;
+
+	sram_size_to_alloc = sizeof(digest_len_init) +
+			sizeof(md5_init) +
+			sizeof(sha1_init) +
+			sizeof(sha224_init) +
+			sizeof(sha256_init);
+
+	if (drvdata->hw_rev >= CC_HW_REV_712)
+		sram_size_to_alloc += sizeof(digest_len_sha512_init) +
+			sizeof(sha384_init) + sizeof(sha512_init);
+
+	sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
+	if (sram_buff == NULL_SRAM_ADDR) {
+		dev_err(dev, "SRAM pool exhausted\n");
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	/* The initial digest-len offset */
+	hash_handle->digest_len_sram_addr = sram_buff;
+
+	/*must be set before the alg registration as it is being used there*/
+	rc = cc_init_hash_sram(drvdata);
+	if (rc) {
+		dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
+		goto fail;
+	}
+
+	/* ahash registration */
+	for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
+		struct cc_hash_alg *t_alg;
+		int hw_mode = driver_hash[alg].hw_mode;
+
+		/* We either support both HASH and MAC or none */
+		if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
+			continue;
+
+		/* register hmac version */
+		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
+		if (IS_ERR(t_alg)) {
+			rc = PTR_ERR(t_alg);
+			dev_err(dev, "%s alg allocation failed\n",
+				driver_hash[alg].driver_name);
+			goto fail;
+		}
+		t_alg->drvdata = drvdata;
+
+		rc = crypto_register_ahash(&t_alg->ahash_alg);
+		if (rc) {
+			dev_err(dev, "%s alg registration failed\n",
+				driver_hash[alg].driver_name);
+			kfree(t_alg);
+			goto fail;
+		} else {
+			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+		}
+
+		if (hw_mode == DRV_CIPHER_XCBC_MAC ||
+		    hw_mode == DRV_CIPHER_CMAC)
+			continue;
+
+		/* register hash version */
+		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
+		if (IS_ERR(t_alg)) {
+			rc = PTR_ERR(t_alg);
+			dev_err(dev, "%s alg allocation failed\n",
+				driver_hash[alg].driver_name);
+			goto fail;
+		}
+		t_alg->drvdata = drvdata;
+
+		rc = crypto_register_ahash(&t_alg->ahash_alg);
+		if (rc) {
+			dev_err(dev, "%s alg registration failed\n",
+				driver_hash[alg].driver_name);
+			kfree(t_alg);
+			goto fail;
+		} else {
+			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+		}
+	}
+
+	return 0;
+
+fail:
+	kfree(drvdata->hash_handle);
+	drvdata->hash_handle = NULL;
+	return rc;
+}
+
+int cc_hash_free(struct cc_drvdata *drvdata)
+{
+	struct cc_hash_alg *t_hash_alg, *hash_n;
+	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+
+	if (hash_handle) {
+		list_for_each_entry_safe(t_hash_alg, hash_n,
+					 &hash_handle->hash_list, entry) {
+			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+			list_del(&t_hash_alg->entry);
+			kfree(t_hash_alg);
+		}
+
+		kfree(hash_handle);
+		drvdata->hash_handle = NULL;
+	}
+	return 0;
+}
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+			  unsigned int *seq_size)
+{
+	unsigned int idx = *seq_size;
+	struct ahash_req_ctx *state = ahash_request_ctx(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	/* Setup XCBC MAC K1 */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
+					    XCBC_MAC_K1_OFFSET),
+		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	/* Setup XCBC MAC K2 */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	/* Setup XCBC MAC K3 */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	/* Loading MAC state */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     CC_AES_BLOCK_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+	*seq_size = idx;
+}
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+			  unsigned int *seq_size)
+{
+	unsigned int idx = *seq_size;
+	struct ahash_req_ctx *state = ahash_request_ctx(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	/* Setup CMAC Key */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+		     ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+		      ctx->key_params.keylen), NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+
+	/* Load MAC state */
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     CC_AES_BLOCK_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	idx++;
+	*seq_size = idx;
+}
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
+			struct cc_hash_ctx *ctx, unsigned int flow_mode,
+			struct cc_hw_desc desc[], bool is_not_last_data,
+			unsigned int *seq_size)
+{
+	unsigned int idx = *seq_size;
+	struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     sg_dma_address(areq_ctx->curr_sg),
+			     areq_ctx->curr_sg->length, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
+		idx++;
+	} else {
+		if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+			dev_dbg(dev, " NULL mode\n");
+			/* nothing to build */
+			return;
+		}
+		/* bypass */
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     areq_ctx->mlli_params.mlli_dma_addr,
+			     areq_ctx->mlli_params.mlli_len, NS_BIT);
+		set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
+			      areq_ctx->mlli_params.mlli_len);
+		set_flow_mode(&desc[idx], BYPASS);
+		idx++;
+		/* process */
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_MLLI,
+			     ctx->drvdata->mlli_sram_addr,
+			     areq_ctx->mlli_nents, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
+		idx++;
+	}
+	if (is_not_last_data)
+		set_din_not_last_indication(&desc[(idx - 1)]);
+	/* return updated desc sequence size */
+	*seq_size = idx;
+}
+
+static const void *cc_larval_digest(struct device *dev, u32 mode)
+{
+	switch (mode) {
+	case DRV_HASH_MD5:
+		return md5_init;
+	case DRV_HASH_SHA1:
+		return sha1_init;
+	case DRV_HASH_SHA224:
+		return sha224_init;
+	case DRV_HASH_SHA256:
+		return sha256_init;
+	case DRV_HASH_SHA384:
+		return sha384_init;
+	case DRV_HASH_SHA512:
+		return sha512_init;
+	default:
+		dev_err(dev, "Invalid hash mode (%d)\n", mode);
+		return md5_init;
+	}
+}
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
+{
+	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+	struct device *dev = drvdata_to_dev(_drvdata);
+
+	switch (mode) {
+	case DRV_HASH_NULL:
+		break; /*Ignore*/
+	case DRV_HASH_MD5:
+		return (hash_handle->larval_digest_sram_addr);
+	case DRV_HASH_SHA1:
+		return (hash_handle->larval_digest_sram_addr +
+			sizeof(md5_init));
+	case DRV_HASH_SHA224:
+		return (hash_handle->larval_digest_sram_addr +
+			sizeof(md5_init) +
+			sizeof(sha1_init));
+	case DRV_HASH_SHA256:
+		return (hash_handle->larval_digest_sram_addr +
+			sizeof(md5_init) +
+			sizeof(sha1_init) +
+			sizeof(sha224_init));
+	case DRV_HASH_SHA384:
+		return (hash_handle->larval_digest_sram_addr +
+			sizeof(md5_init) +
+			sizeof(sha1_init) +
+			sizeof(sha224_init) +
+			sizeof(sha256_init));
+	case DRV_HASH_SHA512:
+		return (hash_handle->larval_digest_sram_addr +
+			sizeof(md5_init) +
+			sizeof(sha1_init) +
+			sizeof(sha224_init) +
+			sizeof(sha256_init) +
+			sizeof(sha384_init));
+	default:
+		dev_err(dev, "Invalid hash mode (%d)\n", mode);
+	}
+
+	/*This is valid wrong value to avoid kernel crash*/
+	return hash_handle->larval_digest_sram_addr;
+}
+
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode)
+{
+	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+	cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+
+	switch (mode) {
+	case DRV_HASH_SHA1:
+	case DRV_HASH_SHA224:
+	case DRV_HASH_SHA256:
+	case DRV_HASH_MD5:
+		return digest_len_addr;
+#if (CC_DEV_SHA_MAX > 256)
+	case DRV_HASH_SHA384:
+	case DRV_HASH_SHA512:
+		return  digest_len_addr + sizeof(digest_len_init);
+#endif
+	default:
+		return digest_len_addr; /*to avoid kernel crash*/
+	}
+}
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
new file mode 100644
index 0000000..2e5bf8b
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_hash.h
+ * ARM CryptoCell Hash Crypto API
+ */
+
+#ifndef __CC_HASH_H__
+#define __CC_HASH_H__
+
+#include "cc_buffer_mgr.h"
+
+#define HMAC_IPAD_CONST	0x36363636
+#define HMAC_OPAD_CONST	0x5C5C5C5C
+#define HASH_LEN_SIZE_712 16
+#define HASH_LEN_SIZE_630 8
+#define HASH_MAX_LEN_SIZE HASH_LEN_SIZE_712
+#define CC_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
+
+#define XCBC_MAC_K1_OFFSET 0
+#define XCBC_MAC_K2_OFFSET 16
+#define XCBC_MAC_K3_OFFSET 32
+
+#define CC_EXPORT_MAGIC 0xC2EE1070U
+
+/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
+ * for xcbc/cmac statesize
+ */
+struct aeshash_state {
+	u8 state[AES_BLOCK_SIZE];
+	unsigned int count;
+	u8 buffer[AES_BLOCK_SIZE];
+};
+
+/* ahash state */
+struct ahash_req_ctx {
+	u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
+	u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+	u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+	u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+	u8 digest_bytes_len[HASH_MAX_LEN_SIZE] ____cacheline_aligned;
+	struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
+	enum cc_req_dma_buf_type data_dma_buf_type;
+	dma_addr_t opad_digest_dma_addr;
+	dma_addr_t digest_buff_dma_addr;
+	dma_addr_t digest_bytes_len_dma_addr;
+	dma_addr_t digest_result_dma_addr;
+	u32 buf_cnt[2];
+	u32 buff_index;
+	u32 xcbc_count; /* count xcbc update operatations */
+	struct scatterlist buff_sg[2];
+	struct scatterlist *curr_sg;
+	u32 in_nents;
+	u32 mlli_nents;
+	struct mlli_params mlli_params;
+};
+
+static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
+{
+	return &state->buf_cnt[state->buff_index];
+}
+
+static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
+{
+	return state->buffers[state->buff_index];
+}
+
+static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
+{
+	return &state->buf_cnt[state->buff_index ^ 1];
+}
+
+static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
+{
+	return state->buffers[state->buff_index ^ 1];
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata);
+int cc_init_hash_sram(struct cc_drvdata *drvdata);
+int cc_hash_free(struct cc_drvdata *drvdata);
+
+/*!
+ * Gets the initial digest length
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ *             MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 returns the address of the initial digest length in SRAM
+ */
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode);
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ *             MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
+
+void cc_hash_global_init(void);
+
+#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
new file mode 100644
index 0000000..616b2e1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_HOST_H__
+#define __CC_HOST_H__
+
+// --------------------------------------
+// BLOCK: HOST_P
+// --------------------------------------
+#define CC_HOST_IRR_REG_OFFSET	0xA00UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT	0x2UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT	0x8UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_GPR0_BIT_SHIFT	0xBUL
+#define CC_HOST_IRR_GPR0_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT	0x13UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT	0x17UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE	0x1UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET	0xA10UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE	0xCUL
+#define CC_HOST_IMR_REG_OFFSET	0xA04UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT	0x1UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT	0x2UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT	0x8UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_GPR0_BIT_SHIFT	0xBUL
+#define CC_HOST_IMR_GPR0_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT	0x13UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT	0x17UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_ICR_REG_OFFSET	0xA08UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT	0x2UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE	0x1UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT	0x8UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE	0x1UL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT	0xBUL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE	0x1UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT	0x13UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE	0x1UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT	0x17UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE	0x1UL
+#define CC_HOST_SIGNATURE_712_REG_OFFSET	0xA24UL
+#define CC_HOST_SIGNATURE_630_REG_OFFSET	0xAC8UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE	0x20UL
+#define CC_HOST_BOOT_REG_OFFSET	0xA28UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT	0x0UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT	0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT	0x2UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT	0x3UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT	0x5UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT	0x6UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE	0x3UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT	0x9UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT	0xAUL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT	0xBUL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT	0xCUL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT	0xDUL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT	0xEUL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT	0xFUL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT	0x10UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT	0x11UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT	0x12UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT	0x13UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT	0x14UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT	0x15UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT	0x16UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT	0x17UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT	0x18UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT	0x19UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT	0x1AUL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT	0x1BUL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT	0x1CUL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT	0x1DUL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT	0x1EUL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define CC_HOST_VERSION_712_REG_OFFSET	0xA40UL
+#define CC_HOST_VERSION_630_REG_OFFSET	0xAD8UL
+#define CC_HOST_VERSION_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_VERSION_VALUE_BIT_SIZE	0x20UL
+#define CC_HOST_KFDE0_VALID_REG_OFFSET	0xA60UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SIZE	0x1UL
+#define CC_HOST_KFDE1_VALID_REG_OFFSET	0xA64UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SIZE	0x1UL
+#define CC_HOST_KFDE2_VALID_REG_OFFSET	0xA68UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SIZE	0x1UL
+#define CC_HOST_KFDE3_VALID_REG_OFFSET	0xA6CUL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SIZE	0x1UL
+#define CC_HOST_GPR0_REG_OFFSET	0xA70UL
+#define CC_HOST_GPR0_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_GPR0_VALUE_BIT_SIZE	0x20UL
+#define CC_GPR_HOST_REG_OFFSET	0xA74UL
+#define CC_GPR_HOST_VALUE_BIT_SHIFT	0x0UL
+#define CC_GPR_HOST_VALUE_BIT_SIZE	0x20UL
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET	0xA78UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT	0x0UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE	0x1UL
+// --------------------------------------
+// BLOCK: HOST_SRAM
+// --------------------------------------
+#define CC_SRAM_DATA_REG_OFFSET	0xF00UL
+#define CC_SRAM_DATA_VALUE_BIT_SHIFT	0x0UL
+#define CC_SRAM_DATA_VALUE_BIT_SIZE	0x20UL
+#define CC_SRAM_ADDR_REG_OFFSET	0xF04UL
+#define CC_SRAM_ADDR_VALUE_BIT_SHIFT	0x0UL
+#define CC_SRAM_ADDR_VALUE_BIT_SIZE	0xFUL
+#define CC_SRAM_DATA_READY_REG_OFFSET	0xF08UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SHIFT	0x0UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SIZE	0x1UL
+
+#endif //__CC_HOST_H__
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
new file mode 100644
index 0000000..a091ae5
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -0,0 +1,576 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_HW_QUEUE_DEFS_H__
+#define __CC_HW_QUEUE_DEFS_H__
+
+#include <linux/types.h>
+
+#include "cc_kernel_regs.h"
+#include <linux/bitfield.h>
+
+/******************************************************************************
+ *				DEFINITIONS
+ ******************************************************************************/
+
+#define HW_DESC_SIZE_WORDS		6
+/* Define max. available slots in HW queue */
+#define HW_QUEUE_SLOTS_MAX              15
+
+#define CC_REG_LOW(word, name)  \
+	(CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
+
+#define CC_REG_HIGH(word, name) \
+	(CC_REG_LOW(word, name) + \
+	 CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
+
+#define CC_GENMASK(word, name) \
+	GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
+
+#define WORD0_VALUE		CC_GENMASK(0, VALUE)
+#define WORD1_DIN_CONST_VALUE	CC_GENMASK(1, DIN_CONST_VALUE)
+#define WORD1_DIN_DMA_MODE	CC_GENMASK(1, DIN_DMA_MODE)
+#define WORD1_DIN_SIZE		CC_GENMASK(1, DIN_SIZE)
+#define WORD1_NOT_LAST		CC_GENMASK(1, NOT_LAST)
+#define WORD1_NS_BIT		CC_GENMASK(1, NS_BIT)
+#define WORD2_VALUE		CC_GENMASK(2, VALUE)
+#define WORD3_DOUT_DMA_MODE	CC_GENMASK(3, DOUT_DMA_MODE)
+#define WORD3_DOUT_LAST_IND	CC_GENMASK(3, DOUT_LAST_IND)
+#define WORD3_DOUT_SIZE		CC_GENMASK(3, DOUT_SIZE)
+#define WORD3_HASH_XOR_BIT	CC_GENMASK(3, HASH_XOR_BIT)
+#define WORD3_NS_BIT		CC_GENMASK(3, NS_BIT)
+#define WORD3_QUEUE_LAST_IND	CC_GENMASK(3, QUEUE_LAST_IND)
+#define WORD4_ACK_NEEDED	CC_GENMASK(4, ACK_NEEDED)
+#define WORD4_AES_SEL_N_HASH	CC_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_BYTES_SWAP	CC_GENMASK(4, BYTES_SWAP)
+#define WORD4_CIPHER_CONF0	CC_GENMASK(4, CIPHER_CONF0)
+#define WORD4_CIPHER_CONF1	CC_GENMASK(4, CIPHER_CONF1)
+#define WORD4_CIPHER_CONF2	CC_GENMASK(4, CIPHER_CONF2)
+#define WORD4_CIPHER_DO		CC_GENMASK(4, CIPHER_DO)
+#define WORD4_CIPHER_MODE	CC_GENMASK(4, CIPHER_MODE)
+#define WORD4_CMAC_SIZE0	CC_GENMASK(4, CMAC_SIZE0)
+#define WORD4_DATA_FLOW_MODE	CC_GENMASK(4, DATA_FLOW_MODE)
+#define WORD4_KEY_SIZE		CC_GENMASK(4, KEY_SIZE)
+#define WORD4_SETUP_OPERATION	CC_GENMASK(4, SETUP_OPERATION)
+#define WORD5_DIN_ADDR_HIGH	CC_GENMASK(5, DIN_ADDR_HIGH)
+#define WORD5_DOUT_ADDR_HIGH	CC_GENMASK(5, DOUT_ADDR_HIGH)
+
+/******************************************************************************
+ *				TYPE DEFINITIONS
+ ******************************************************************************/
+
+struct cc_hw_desc {
+	union {
+		u32 word[HW_DESC_SIZE_WORDS];
+		u16 hword[HW_DESC_SIZE_WORDS * 2];
+	};
+};
+
+enum cc_axi_sec {
+	AXI_SECURE = 0,
+	AXI_NOT_SECURE = 1
+};
+
+enum cc_desc_direction {
+	DESC_DIRECTION_ILLEGAL = -1,
+	DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
+	DESC_DIRECTION_DECRYPT_DECRYPT = 1,
+	DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
+	DESC_DIRECTION_END = S32_MAX,
+};
+
+enum cc_dma_mode {
+	DMA_MODE_NULL		= -1,
+	NO_DMA			= 0,
+	DMA_SRAM		= 1,
+	DMA_DLLI		= 2,
+	DMA_MLLI		= 3,
+	DMA_MODE_END		= S32_MAX,
+};
+
+enum cc_flow_mode {
+	FLOW_MODE_NULL		= -1,
+	/* data flows */
+	BYPASS			= 0,
+	DIN_AES_DOUT		= 1,
+	AES_to_HASH		= 2,
+	AES_and_HASH		= 3,
+	DIN_DES_DOUT		= 4,
+	DES_to_HASH		= 5,
+	DES_and_HASH		= 6,
+	DIN_HASH		= 7,
+	DIN_HASH_and_BYPASS	= 8,
+	AESMAC_and_BYPASS	= 9,
+	AES_to_HASH_and_DOUT	= 10,
+	DIN_RC4_DOUT		= 11,
+	DES_to_HASH_and_DOUT	= 12,
+	AES_to_AES_to_HASH_and_DOUT	= 13,
+	AES_to_AES_to_HASH	= 14,
+	AES_to_HASH_and_AES	= 15,
+	DIN_AES_AESMAC		= 17,
+	HASH_to_DOUT		= 18,
+	/* setup flows */
+	S_DIN_to_AES		= 32,
+	S_DIN_to_AES2		= 33,
+	S_DIN_to_DES		= 34,
+	S_DIN_to_RC4		= 35,
+	S_DIN_to_HASH		= 37,
+	S_AES_to_DOUT		= 38,
+	S_AES2_to_DOUT		= 39,
+	S_RC4_to_DOUT		= 41,
+	S_DES_to_DOUT		= 42,
+	S_HASH_to_DOUT		= 43,
+	SET_FLOW_ID		= 44,
+	FLOW_MODE_END = S32_MAX,
+};
+
+enum cc_setup_op {
+	SETUP_LOAD_NOP		= 0,
+	SETUP_LOAD_STATE0	= 1,
+	SETUP_LOAD_STATE1	= 2,
+	SETUP_LOAD_STATE2	= 3,
+	SETUP_LOAD_KEY0		= 4,
+	SETUP_LOAD_XEX_KEY	= 5,
+	SETUP_WRITE_STATE0	= 8,
+	SETUP_WRITE_STATE1	= 9,
+	SETUP_WRITE_STATE2	= 10,
+	SETUP_WRITE_STATE3	= 11,
+	SETUP_OP_END = S32_MAX,
+};
+
+enum cc_hash_conf_pad {
+	HASH_PADDING_DISABLED = 0,
+	HASH_PADDING_ENABLED = 1,
+	HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
+	HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
+};
+
+enum cc_aes_mac_selector {
+	AES_SK = 1,
+	AES_CMAC_INIT = 2,
+	AES_CMAC_SIZE0 = 3,
+	AES_MAC_END = S32_MAX,
+};
+
+#define HW_KEY_MASK_CIPHER_DO	  0x3
+#define HW_KEY_SHIFT_CIPHER_CFG2  2
+
+/* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */
+/* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */
+enum cc_hw_crypto_key {
+	USER_KEY = 0,			/* 0x0000 */
+	ROOT_KEY = 1,			/* 0x0001 */
+	PROVISIONING_KEY = 2,		/* 0x0010 */ /* ==KCP */
+	SESSION_KEY = 3,		/* 0x0011 */
+	RESERVED_KEY = 4,		/* NA */
+	PLATFORM_KEY = 5,		/* 0x0101 */
+	CUSTOMER_KEY = 6,		/* 0x0110 */
+	KFDE0_KEY = 7,			/* 0x0111 */
+	KFDE1_KEY = 9,			/* 0x1001 */
+	KFDE2_KEY = 10,			/* 0x1010 */
+	KFDE3_KEY = 11,			/* 0x1011 */
+	END_OF_KEYS = S32_MAX,
+};
+
+enum cc_hw_aes_key_size {
+	AES_128_KEY = 0,
+	AES_192_KEY = 1,
+	AES_256_KEY = 2,
+	END_OF_AES_KEYS = S32_MAX,
+};
+
+enum cc_hash_cipher_pad {
+	DO_NOT_PAD = 0,
+	DO_PAD = 1,
+	HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
+};
+
+/*****************************/
+/* Descriptor packing macros */
+/*****************************/
+
+/*
+ * Init a HW descriptor struct
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void hw_desc_init(struct cc_hw_desc *pdesc)
+{
+	memset(pdesc, 0, sizeof(struct cc_hw_desc));
+}
+
+/*
+ * Indicates the end of current HW descriptors flow and release the HW engines.
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
+}
+
+/*
+ * Set the DIN field of a HW descriptors
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: dinAdr DIN address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_din_type(struct cc_hw_desc *pdesc,
+				enum cc_dma_mode dma_mode, dma_addr_t addr,
+				u32 size, enum cc_axi_sec axi_sec)
+{
+	pdesc->word[0] = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
+#endif
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
+				FIELD_PREP(WORD1_DIN_SIZE, size) |
+				FIELD_PREP(WORD1_NS_BIT, axi_sec);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DIN address
+ * @size: Data size in bytes
+ */
+static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+	pdesc->word[0] = addr;
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to SRAM mode.
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DIN address
+ * @size Data size in bytes
+ */
+static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
+				u32 size)
+{
+	pdesc->word[0] = (u32)addr;
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
+				FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to CONST mode
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @val: DIN const value
+ * @size: Data size in bytes
+ */
+static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
+{
+	pdesc->word[0] = val;
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_CONST_VALUE, 1) |
+			FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM) |
+			FIELD_PREP(WORD1_DIN_SIZE, size);
+}
+
+/*
+ * Set the DIN not last input data indicator
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_type(struct cc_hw_desc *pdesc,
+				 enum cc_dma_mode dma_mode, dma_addr_t addr,
+				 u32 size, enum cc_axi_sec axi_sec)
+{
+	pdesc->word[2] = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
+#endif
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
+				FIELD_PREP(WORD3_DOUT_SIZE, size) |
+				FIELD_PREP(WORD3_NS_BIT, axi_sec);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @last_ind: The last indication bit
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+				 u32 size, enum cc_axi_sec axi_sec,
+				 u32 last_ind)
+{
+	set_dout_type(pdesc, DMA_DLLI, addr, size, axi_sec);
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @last_ind: The last indication bit
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+				 u32 size, enum cc_axi_sec axi_sec,
+				 bool last_ind)
+{
+	set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @write_enable: Enables a write operation to a register
+ */
+static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
+				   u32 size, bool write_enable)
+{
+	pdesc->word[2] = addr;
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_SIZE, size) |
+			FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
+}
+
+/*
+ * Set the word for the XOR operation.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @val: xor data value
+ */
+static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
+{
+	pdesc->word[2] = val;
+}
+
+/*
+ * Sets the XOR indicator bit in the descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_xor_active(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
+}
+
+/*
+ * Select the AES engine instead of HASH engine when setting up combined mode
+ * with AES XCBC MAC
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to SRAM mode
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ */
+static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+	pdesc->word[2] = addr;
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, DMA_SRAM) |
+			FIELD_PREP(WORD3_DOUT_SIZE, size);
+}
+
+/*
+ * Sets the data unit size for XEX mode in data_out_addr[15:0]
+ *
+ * @pdesc: pDesc pointer HW descriptor struct
+ * @size: data unit size for XEX mode
+ */
+static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
+{
+	pdesc->word[2] = size;
+}
+
+/*
+ * Set the number of rounds for Multi2 in data_out_addr[15:0]
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @num: number of rounds for Multi2
+ */
+static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
+{
+	pdesc->word[2] = num;
+}
+
+/*
+ * Set the flow mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_flow_mode(struct cc_hw_desc *pdesc,
+				 enum cc_flow_mode mode)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
+}
+
+/*
+ * Set the cipher mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode:  Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
+				   enum drv_cipher_mode mode)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
+}
+
+/*
+ * Set the cipher configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
+				      enum drv_crypto_direction mode)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
+}
+
+/*
+ * Set the cipher configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
+				      enum cc_hash_conf_pad config)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
+}
+
+/*
+ * Set HW key configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
+ */
+static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
+				     enum cc_hw_crypto_key hw_key)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+				     (hw_key & HW_KEY_MASK_CIPHER_DO)) |
+			FIELD_PREP(WORD4_CIPHER_CONF2,
+				   (hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
+}
+
+/*
+ * Set byte order of all setup-finalize descriptors.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
+}
+
+/*
+ * Set CMAC_SIZE0 mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
+}
+
+/*
+ * Set key size descriptor field.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
+}
+
+/*
+ * Set AES key size.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
+{
+	set_key_size(pdesc, ((size >> 3) - 2));
+}
+
+/*
+ * Set DES key size.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
+{
+	set_key_size(pdesc, ((size >> 3) - 1));
+}
+
+/*
+ * Set the descriptor setup mode
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the setup modes defined in [CC7x-DESC]
+ */
+static inline void set_setup_mode(struct cc_hw_desc *pdesc,
+				  enum cc_setup_op mode)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
+}
+
+/*
+ * Set the descriptor cipher DO
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the cipher do defined in [CC7x-DESC]
+ */
+static inline void set_cipher_do(struct cc_hw_desc *pdesc,
+				 enum cc_hash_cipher_pad config)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+				(config & HW_KEY_MASK_CIPHER_DO));
+}
+
+#endif /*__CC_HW_QUEUE_DEFS_H__*/
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
new file mode 100644
index 0000000..7694583
--- /dev/null
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <crypto/ctr.h>
+#include "cc_driver.h"
+#include "cc_ivgen.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_buffer_mgr.h"
+
+/* The max. size of pool *MUST* be <= SRAM total size */
+#define CC_IVPOOL_SIZE 1024
+/* The first 32B fraction of pool are dedicated to the
+ * next encryption "key" & "IV" for pool regeneration
+ */
+#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
+#define CC_IVPOOL_GEN_SEQ_LEN	4
+
+/**
+ * struct cc_ivgen_ctx -IV pool generation context
+ * @pool:          the start address of the iv-pool resides in internal RAM
+ * @ctr_key_dma:   address of pool's encryption key material in internal RAM
+ * @ctr_iv_dma:    address of pool's counter iv in internal RAM
+ * @next_iv_ofs:   the offset to the next available IV in pool
+ * @pool_meta:     virt. address of the initial enc. key/IV
+ * @pool_meta_dma: phys. address of the initial enc. key/IV
+ */
+struct cc_ivgen_ctx {
+	cc_sram_addr_t pool;
+	cc_sram_addr_t ctr_key;
+	cc_sram_addr_t ctr_iv;
+	u32 next_iv_ofs;
+	u8 *pool_meta;
+	dma_addr_t pool_meta_dma;
+};
+
+/*!
+ * Generates CC_IVPOOL_SIZE of random bytes by
+ * encrypting 0's using AES128-CTR.
+ *
+ * \param ivgen iv-pool context
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ */
+static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
+			  struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
+{
+	unsigned int idx = *iv_seq_len;
+
+	if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
+		/* The sequence will be longer than allowed */
+		return -EINVAL;
+	}
+	/* Setup key */
+	hw_desc_init(&iv_seq[idx]);
+	set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
+	set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
+	set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
+	set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
+	idx++;
+
+	/* Setup cipher state */
+	hw_desc_init(&iv_seq[idx]);
+	set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
+	set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
+	set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
+	set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
+	idx++;
+
+	/* Perform dummy encrypt to skip first block */
+	hw_desc_init(&iv_seq[idx]);
+	set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
+	set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
+	set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
+	idx++;
+
+	/* Generate IV pool */
+	hw_desc_init(&iv_seq[idx]);
+	set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
+	set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
+	set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
+	idx++;
+
+	*iv_seq_len = idx; /* Update sequence length */
+
+	/* queue ordering assures pool readiness */
+	ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
+
+	return 0;
+}
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_init_iv_sram(struct cc_drvdata *drvdata)
+{
+	struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+	struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
+	unsigned int iv_seq_len = 0;
+	int rc;
+
+	/* Generate initial enc. key/iv */
+	get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
+
+	/* The first 32B reserved for the enc. Key/IV */
+	ivgen_ctx->ctr_key = ivgen_ctx->pool;
+	ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
+
+	/* Copy initial enc. key and IV to SRAM at a single descriptor */
+	hw_desc_init(&iv_seq[iv_seq_len]);
+	set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
+		     CC_IVPOOL_META_SIZE, NS_BIT);
+	set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
+		      CC_IVPOOL_META_SIZE);
+	set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
+	iv_seq_len++;
+
+	/* Generate initial pool */
+	rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
+	if (rc)
+		return rc;
+
+	/* Fire-and-forget */
+	return send_request_init(drvdata, iv_seq, iv_seq_len);
+}
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void cc_ivgen_fini(struct cc_drvdata *drvdata)
+{
+	struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+	struct device *device = &drvdata->plat_dev->dev;
+
+	if (!ivgen_ctx)
+		return;
+
+	if (ivgen_ctx->pool_meta) {
+		memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
+		dma_free_coherent(device, CC_IVPOOL_META_SIZE,
+				  ivgen_ctx->pool_meta,
+				  ivgen_ctx->pool_meta_dma);
+	}
+
+	ivgen_ctx->pool = NULL_SRAM_ADDR;
+
+	/* release "this" context */
+	kfree(ivgen_ctx);
+}
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_ivgen_init(struct cc_drvdata *drvdata)
+{
+	struct cc_ivgen_ctx *ivgen_ctx;
+	struct device *device = &drvdata->plat_dev->dev;
+	int rc;
+
+	/* Allocate "this" context */
+	ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+	if (!ivgen_ctx)
+		return -ENOMEM;
+
+	/* Allocate pool's header for initial enc. key/IV */
+	ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
+						  &ivgen_ctx->pool_meta_dma,
+						  GFP_KERNEL);
+	if (!ivgen_ctx->pool_meta) {
+		dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
+			CC_IVPOOL_META_SIZE);
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Allocate IV pool in SRAM */
+	ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
+	if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
+		dev_err(device, "SRAM pool exhausted\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	drvdata->ivgen_handle = ivgen_ctx;
+
+	return cc_init_iv_sram(drvdata);
+
+out:
+	cc_ivgen_fini(drvdata);
+	return rc;
+}
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements
+ *                       of iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+	      unsigned int iv_out_dma_len, unsigned int iv_out_size,
+	      struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
+{
+	struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+	unsigned int idx = *iv_seq_len;
+	struct device *dev = drvdata_to_dev(drvdata);
+	unsigned int t;
+
+	if (iv_out_size != CC_AES_IV_SIZE &&
+	    iv_out_size != CTR_RFC3686_IV_SIZE) {
+		return -EINVAL;
+	}
+	if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
+		/* The sequence will be longer than allowed */
+		return -EINVAL;
+	}
+
+	/* check that number of generated IV is limited to max dma address
+	 * iv buffer size
+	 */
+	if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
+		/* The sequence will be longer than allowed */
+		return -EINVAL;
+	}
+
+	for (t = 0; t < iv_out_dma_len; t++) {
+		/* Acquire IV from pool */
+		hw_desc_init(&iv_seq[idx]);
+		set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
+					    ivgen_ctx->next_iv_ofs),
+			     iv_out_size);
+		set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
+			      NS_BIT, 0);
+		set_flow_mode(&iv_seq[idx], BYPASS);
+		idx++;
+	}
+
+	/* Bypass operation is proceeded by crypto sequence, hence must
+	 *  assure bypass-write-transaction by a memory barrier
+	 */
+	hw_desc_init(&iv_seq[idx]);
+	set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
+	set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
+	idx++;
+
+	*iv_seq_len = idx; /* update seq length */
+
+	/* Update iv index */
+	ivgen_ctx->next_iv_ofs += iv_out_size;
+
+	if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
+		dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
+		/* pool is drained -regenerate it! */
+		return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
new file mode 100644
index 0000000..b6ac169
--- /dev/null
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_IVGEN_H__
+#define __CC_IVGEN_H__
+
+#include "cc_hw_queue_defs.h"
+
+#define CC_IVPOOL_SEQ_LEN 8
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_ivgen_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void cc_ivgen_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_init_iv_sram(struct cc_drvdata *drvdata);
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements of
+ *                       iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+	      unsigned int iv_out_dma_len, unsigned int iv_out_size,
+	      struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
+
+#endif /*__CC_IVGEN_H__*/
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
new file mode 100644
index 0000000..8d7262a
--- /dev/null
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_CRYS_KERNEL_H__
+#define __CC_CRYS_KERNEL_H__
+
+// --------------------------------------
+// BLOCK: DSCRPTR
+// --------------------------------------
+#define CC_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET	0xE00UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE	0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT	0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_SW_RESET_REG_OFFSET	0xE40UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET	0xE60UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE	0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT	0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE	0xCUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT	0x16UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE	0x3UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET	0xE64UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_MEASURE_CNTR_REG_OFFSET	0xE68UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE	0x20UL
+#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET	0xE80UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE	0x20UL
+#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET	0xE84UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE	0x18UL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT	0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT	0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT	0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT	0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT	0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD2_REG_OFFSET	0xE88UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE	0x20UL
+#define CC_DSCRPTR_QUEUE_WORD3_REG_OFFSET	0xE8CUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE	0x18UL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT	0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT	0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT	0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT	0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT	0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_REG_OFFSET	0xE90UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE	0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT	0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT	0x7UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT	0x8UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT	0xAUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE	0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT	0xEUL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT	0xFUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT	0x11UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT	0x13UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT	0x14UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT	0x16UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE	0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT	0x18UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE	0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT	0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT	0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT	0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT	0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE	0x1UL
+#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET	0xE94UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE	0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT	0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE	0x10UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET	0xE98UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE	0xAUL
+#define CC_DSCRPTR_QUEUE_CONTENT_REG_OFFSET	0xE9CUL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT	0x0UL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE	0xAUL
+// --------------------------------------
+// BLOCK: AXI_P
+// --------------------------------------
+#define CC_AXIM_MON_INFLIGHT_REG_OFFSET	0xB00UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT	0x0UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE	0x8UL
+#define CC_AXIM_MON_INFLIGHTLAST_REG_OFFSET	0xB40UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT	0x0UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE	0x8UL
+#define CC_AXIM_MON_COMP_REG_OFFSET	0xB80UL
+#define CC_AXIM_MON_COMP8_REG_OFFSET	0xBA0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SHIFT	0x0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SIZE	0x10UL
+#define CC_AXIM_MON_ERR_REG_OFFSET	0xBC4UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SHIFT	0x0UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SIZE	0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SHIFT	0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SIZE	0x4UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SHIFT	0x10UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SIZE	0x2UL
+#define CC_AXIM_MON_ERR_RID_BIT_SHIFT	0x12UL
+#define CC_AXIM_MON_ERR_RID_BIT_SIZE	0x4UL
+#define CC_AXIM_CFG_REG_OFFSET	0xBE8UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SHIFT	0x4UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SIZE	0x1UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SHIFT	0x5UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SIZE	0x1UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SHIFT	0x6UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SIZE	0x1UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SHIFT	0x7UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SIZE	0x1UL
+#define CC_AXIM_ACE_CONST_REG_OFFSET	0xBECUL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT	0x0UL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE	0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT	0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE	0x2UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SHIFT	0x4UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SIZE	0x2UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SHIFT	0x6UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SIZE	0x2UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT	0x8UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE	0x4UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT	0xCUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE	0x3UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT	0xFUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE	0x3UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT	0x12UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE	0x7UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT	0x19UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE	0x4UL
+#define CC_AXIM_CACHE_PARAMS_REG_OFFSET	0xBF0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT	0x0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE	0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT	0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE	0x4UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT	0x8UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE	0x4UL
+#endif	// __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
new file mode 100644
index 0000000..64b15ac
--- /dev/null
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef _CC_LLI_DEFS_H_
+#define _CC_LLI_DEFS_H_
+
+#include <linux/types.h>
+
+/* Max DLLI size
+ *  AKA CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+ */
+#define DLLI_SIZE_BIT_SIZE	0x18
+
+#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
+
+#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
+#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
+#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
+		(2 * LLI_MAX_NUM_OF_DATA_ENTRIES + \
+		 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)
+
+/* Size of entry */
+#define LLI_ENTRY_WORD_SIZE 2
+#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* Word0[31:0] = ADDR[31:0] */
+#define LLI_WORD0_OFFSET 0
+#define LLI_LADDR_BIT_OFFSET 0
+#define LLI_LADDR_BIT_SIZE 32
+/* Word1[31:16] = ADDR[47:32]; Word1[15:0] = SIZE */
+#define LLI_WORD1_OFFSET 1
+#define LLI_SIZE_BIT_OFFSET 0
+#define LLI_SIZE_BIT_SIZE 16
+#define LLI_HADDR_BIT_OFFSET 16
+#define LLI_HADDR_BIT_SIZE 16
+
+#define LLI_SIZE_MASK GENMASK((LLI_SIZE_BIT_SIZE - 1), LLI_SIZE_BIT_OFFSET)
+#define LLI_HADDR_MASK GENMASK( \
+			       (LLI_HADDR_BIT_OFFSET + LLI_HADDR_BIT_SIZE - 1),\
+				LLI_HADDR_BIT_OFFSET)
+
+static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
+{
+	lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
+	lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+}
+
+static inline void cc_lli_set_size(u32 *lli_p, u16 size)
+{
+	lli_p[LLI_WORD1_OFFSET] &= ~LLI_SIZE_MASK;
+	lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_SIZE_MASK, size);
+}
+
+#endif /*_CC_LLI_DEFS_H_*/
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
new file mode 100644
index 0000000..d990f47
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_hash.h"
+#include "cc_pm.h"
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+const struct dev_pm_ops ccree_pm = {
+	SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
+};
+
+int cc_pm_suspend(struct device *dev)
+{
+	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+	int rc;
+
+	dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+	rc = cc_suspend_req_queue(drvdata);
+	if (rc) {
+		dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
+		return rc;
+	}
+	fini_cc_regs(drvdata);
+	cc_clk_off(drvdata);
+	return 0;
+}
+
+int cc_pm_resume(struct device *dev)
+{
+	int rc;
+	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+
+	rc = cc_clk_on(drvdata);
+	if (rc) {
+		dev_err(dev, "failed getting clock back on. We're toast.\n");
+		return rc;
+	}
+
+	rc = init_cc_regs(drvdata, false);
+	if (rc) {
+		dev_err(dev, "init_cc_regs (%x)\n", rc);
+		return rc;
+	}
+
+	rc = cc_resume_req_queue(drvdata);
+	if (rc) {
+		dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
+		return rc;
+	}
+
+	/* must be after the queue resuming as it uses the HW queue*/
+	cc_init_hash_sram(drvdata);
+
+	cc_init_iv_sram(drvdata);
+	return 0;
+}
+
+int cc_pm_get(struct device *dev)
+{
+	int rc = 0;
+	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (cc_req_queue_suspended(drvdata))
+		rc = pm_runtime_get_sync(dev);
+	else
+		pm_runtime_get_noresume(dev);
+
+	return rc;
+}
+
+int cc_pm_put_suspend(struct device *dev)
+{
+	int rc = 0;
+	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (!cc_req_queue_suspended(drvdata)) {
+		pm_runtime_mark_last_busy(dev);
+		rc = pm_runtime_put_autosuspend(dev);
+	} else {
+		/* Something wrong happens*/
+		dev_err(dev, "request to suspend already suspended queue");
+		rc = -EBUSY;
+	}
+	return rc;
+}
+
+int cc_pm_init(struct cc_drvdata *drvdata)
+{
+	int rc = 0;
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	/* must be before the enabling to avoid resdundent suspending */
+	pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+	pm_runtime_use_autosuspend(dev);
+	/* activate the PM module */
+	rc = pm_runtime_set_active(dev);
+	if (rc)
+		return rc;
+	/* enable the PM module*/
+	pm_runtime_enable(dev);
+
+	return rc;
+}
+
+void cc_pm_fini(struct cc_drvdata *drvdata)
+{
+	pm_runtime_disable(drvdata_to_dev(drvdata));
+}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
new file mode 100644
index 0000000..020a540
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_pm.h
+ */
+
+#ifndef __CC_POWER_MGR_H__
+#define __CC_POWER_MGR_H__
+
+#include "cc_driver.h"
+
+#define CC_SUSPEND_TIMEOUT 3000
+
+#if defined(CONFIG_PM)
+
+extern const struct dev_pm_ops ccree_pm;
+
+int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_fini(struct cc_drvdata *drvdata);
+int cc_pm_suspend(struct device *dev);
+int cc_pm_resume(struct device *dev);
+int cc_pm_get(struct device *dev);
+int cc_pm_put_suspend(struct device *dev);
+
+#else
+
+static inline int cc_pm_init(struct cc_drvdata *drvdata)
+{
+	return 0;
+}
+
+static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
+
+static inline int cc_pm_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static inline int cc_pm_resume(struct device *dev)
+{
+	return 0;
+}
+
+static inline int cc_pm_get(struct device *dev)
+{
+	return 0;
+}
+
+static inline int cc_pm_put_suspend(struct device *dev)
+{
+	return 0;
+}
+
+#endif
+
+#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
new file mode 100644
index 0000000..83a8aaa
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_pm.h"
+
+#define CC_MAX_POLL_ITER	10
+/* The highest descriptor count in used */
+#define CC_MAX_DESC_SEQ_LEN	23
+
+struct cc_req_mgr_handle {
+	/* Request manager resources */
+	unsigned int hw_queue_size; /* HW capability */
+	unsigned int min_free_hw_slots;
+	unsigned int max_used_sw_slots;
+	struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
+	u32 req_queue_head;
+	u32 req_queue_tail;
+	u32 axi_completed;
+	u32 q_free_slots;
+	/* This lock protects access to HW register
+	 * that must be single request at a time
+	 */
+	spinlock_t hw_lock;
+	struct cc_hw_desc compl_desc;
+	u8 *dummy_comp_buff;
+	dma_addr_t dummy_comp_buff_dma;
+
+	/* backlog queue */
+	struct list_head backlog;
+	unsigned int bl_len;
+	spinlock_t bl_lock; /* protect backlog queue */
+
+#ifdef COMP_IN_WQ
+	struct workqueue_struct *workq;
+	struct delayed_work compwork;
+#else
+	struct tasklet_struct comptask;
+#endif
+	bool is_runtime_suspended;
+};
+
+struct cc_bl_item {
+	struct cc_crypto_req creq;
+	struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
+	unsigned int len;
+	struct list_head list;
+	bool notif;
+};
+
+static void comp_handler(unsigned long devarg);
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work);
+#endif
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata)
+{
+	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	if (!req_mgr_h)
+		return; /* Not allocated */
+
+	if (req_mgr_h->dummy_comp_buff_dma) {
+		dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
+				  req_mgr_h->dummy_comp_buff_dma);
+	}
+
+	dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+						req_mgr_h->min_free_hw_slots));
+	dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+
+#ifdef COMP_IN_WQ
+	flush_workqueue(req_mgr_h->workq);
+	destroy_workqueue(req_mgr_h->workq);
+#else
+	/* Kill tasklet */
+	tasklet_kill(&req_mgr_h->comptask);
+#endif
+	kzfree(req_mgr_h);
+	drvdata->request_mgr_handle = NULL;
+}
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata)
+{
+	struct cc_req_mgr_handle *req_mgr_h;
+	struct device *dev = drvdata_to_dev(drvdata);
+	int rc = 0;
+
+	req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
+	if (!req_mgr_h) {
+		rc = -ENOMEM;
+		goto req_mgr_init_err;
+	}
+
+	drvdata->request_mgr_handle = req_mgr_h;
+
+	spin_lock_init(&req_mgr_h->hw_lock);
+	spin_lock_init(&req_mgr_h->bl_lock);
+	INIT_LIST_HEAD(&req_mgr_h->backlog);
+
+#ifdef COMP_IN_WQ
+	dev_dbg(dev, "Initializing completion workqueue\n");
+	req_mgr_h->workq = create_singlethread_workqueue("ccree");
+	if (!req_mgr_h->workq) {
+		dev_err(dev, "Failed creating work queue\n");
+		rc = -ENOMEM;
+		goto req_mgr_init_err;
+	}
+	INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
+#else
+	dev_dbg(dev, "Initializing completion tasklet\n");
+	tasklet_init(&req_mgr_h->comptask, comp_handler,
+		     (unsigned long)drvdata);
+#endif
+	req_mgr_h->hw_queue_size = cc_ioread(drvdata,
+					     CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
+	dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+	if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
+		dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
+			req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+		rc = -ENOMEM;
+		goto req_mgr_init_err;
+	}
+	req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
+	req_mgr_h->max_used_sw_slots = 0;
+
+	/* Allocate DMA word for "dummy" completion descriptor use */
+	req_mgr_h->dummy_comp_buff =
+		dma_alloc_coherent(dev, sizeof(u32),
+				   &req_mgr_h->dummy_comp_buff_dma,
+				   GFP_KERNEL);
+	if (!req_mgr_h->dummy_comp_buff) {
+		dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
+			sizeof(u32));
+		rc = -ENOMEM;
+		goto req_mgr_init_err;
+	}
+
+	/* Init. "dummy" completion descriptor */
+	hw_desc_init(&req_mgr_h->compl_desc);
+	set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
+	set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
+		      sizeof(u32), NS_BIT, 1);
+	set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
+	set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
+
+	return 0;
+
+req_mgr_init_err:
+	cc_req_mgr_fini(drvdata);
+	return rc;
+}
+
+static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
+			unsigned int seq_len)
+{
+	int i, w;
+	void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	/*
+	 * We do indeed write all 6 command words to the same
+	 * register. The HW supports this.
+	 */
+
+	for (i = 0; i < seq_len; i++) {
+		for (w = 0; w <= 5; w++)
+			writel_relaxed(seq[i].word[w], reg);
+
+		if (cc_dump_desc)
+			dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+				i, seq[i].word[0], seq[i].word[1],
+				seq[i].word[2], seq[i].word[3],
+				seq[i].word[4], seq[i].word[5]);
+	}
+}
+
+/*!
+ * Completion will take place if and only if user requested completion
+ * by cc_send_sync_request().
+ *
+ * \param dev
+ * \param dx_compl_h The completion event to signal
+ */
+static void request_mgr_complete(struct device *dev, void *dx_compl_h,
+				 int dummy)
+{
+	struct completion *this_compl = dx_compl_h;
+
+	complete(this_compl);
+}
+
+static int cc_queues_status(struct cc_drvdata *drvdata,
+			    struct cc_req_mgr_handle *req_mgr_h,
+			    unsigned int total_seq_len)
+{
+	unsigned long poll_queue;
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	/* SW queue is checked only once as it will not
+	 * be chaned during the poll because the spinlock_bh
+	 * is held by the thread
+	 */
+	if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+	    req_mgr_h->req_queue_tail) {
+		dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+			req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+		return -ENOSPC;
+	}
+
+	if (req_mgr_h->q_free_slots >= total_seq_len)
+		return 0;
+
+	/* Wait for space in HW queue. Poll constant num of iterations. */
+	for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
+		req_mgr_h->q_free_slots =
+			cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+		if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
+			req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
+
+		if (req_mgr_h->q_free_slots >= total_seq_len) {
+			/* If there is enough place return */
+			return 0;
+		}
+
+		dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+			req_mgr_h->q_free_slots, total_seq_len);
+	}
+	/* No room in the HW queue try again later */
+	dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+		req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
+		req_mgr_h->q_free_slots, total_seq_len);
+	return -ENOSPC;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ * Need to be called with HW lock held and PM running
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param add_comp If "true": add an artificial dout DMA to mark completion
+ *
+ * \return int Returns -EINPROGRESS or error code
+ */
+static int cc_do_send_request(struct cc_drvdata *drvdata,
+			      struct cc_crypto_req *cc_req,
+			      struct cc_hw_desc *desc, unsigned int len,
+				bool add_comp, bool ivgen)
+{
+	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+	unsigned int used_sw_slots;
+	unsigned int iv_seq_len = 0;
+	unsigned int total_seq_len = len; /*initial sequence length*/
+	struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
+	struct device *dev = drvdata_to_dev(drvdata);
+	int rc;
+
+	if (ivgen) {
+		dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+			cc_req->ivgen_dma_addr_len,
+			&cc_req->ivgen_dma_addr[0],
+			&cc_req->ivgen_dma_addr[1],
+			&cc_req->ivgen_dma_addr[2],
+			cc_req->ivgen_size);
+
+		/* Acquire IV from pool */
+		rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
+			       cc_req->ivgen_dma_addr_len,
+			       cc_req->ivgen_size, iv_seq, &iv_seq_len);
+
+		if (rc) {
+			dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
+			return rc;
+		}
+
+		total_seq_len += iv_seq_len;
+	}
+
+	used_sw_slots = ((req_mgr_h->req_queue_head -
+			  req_mgr_h->req_queue_tail) &
+			 (MAX_REQUEST_QUEUE_SIZE - 1));
+	if (used_sw_slots > req_mgr_h->max_used_sw_slots)
+		req_mgr_h->max_used_sw_slots = used_sw_slots;
+
+	/* Enqueue request - must be locked with HW lock*/
+	req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
+	req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
+				    (MAX_REQUEST_QUEUE_SIZE - 1);
+	/* TODO: Use circ_buf.h ? */
+
+	dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+
+	/*
+	 * We are about to push command to the HW via the command registers
+	 * that may refernece hsot memory. We need to issue a memory barrier
+	 * to make sure there are no outstnading memory writes
+	 */
+	wmb();
+
+	/* STAT_PHASE_4: Push sequence */
+	if (ivgen)
+		enqueue_seq(drvdata, iv_seq, iv_seq_len);
+
+	enqueue_seq(drvdata, desc, len);
+
+	if (add_comp) {
+		enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
+		total_seq_len++;
+	}
+
+	if (req_mgr_h->q_free_slots < total_seq_len) {
+		/* This situation should never occur. Maybe indicating problem
+		 * with resuming power. Set the free slot count to 0 and hope
+		 * for the best.
+		 */
+		dev_err(dev, "HW free slot count mismatch.");
+		req_mgr_h->q_free_slots = 0;
+	} else {
+		/* Update the free slots in HW queue */
+		req_mgr_h->q_free_slots -= total_seq_len;
+	}
+
+	/* Operation still in process */
+	return -EINPROGRESS;
+}
+
+static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
+			       struct cc_bl_item *bli)
+{
+	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+	spin_lock_bh(&mgr->bl_lock);
+	list_add_tail(&bli->list, &mgr->backlog);
+	++mgr->bl_len;
+	spin_unlock_bh(&mgr->bl_lock);
+	tasklet_schedule(&mgr->comptask);
+}
+
+static void cc_proc_backlog(struct cc_drvdata *drvdata)
+{
+	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+	struct cc_bl_item *bli;
+	struct cc_crypto_req *creq;
+	struct crypto_async_request *req;
+	bool ivgen;
+	unsigned int total_len;
+	struct device *dev = drvdata_to_dev(drvdata);
+	int rc;
+
+	spin_lock(&mgr->bl_lock);
+
+	while (mgr->bl_len) {
+		bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+		spin_unlock(&mgr->bl_lock);
+
+		creq = &bli->creq;
+		req = (struct crypto_async_request *)creq->user_arg;
+
+		/*
+		 * Notify the request we're moving out of the backlog
+		 * but only if we haven't done so already.
+		 */
+		if (!bli->notif) {
+			req->complete(req, -EINPROGRESS);
+			bli->notif = true;
+		}
+
+		ivgen = !!creq->ivgen_dma_addr_len;
+		total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+
+		spin_lock(&mgr->hw_lock);
+
+		rc = cc_queues_status(drvdata, mgr, total_len);
+		if (rc) {
+			/*
+			 * There is still not room in the FIFO for
+			 * this request. Bail out. We'll return here
+			 * on the next completion irq.
+			 */
+			spin_unlock(&mgr->hw_lock);
+			return;
+		}
+
+		rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
+					bli->len, false, ivgen);
+
+		spin_unlock(&mgr->hw_lock);
+
+		if (rc != -EINPROGRESS) {
+			cc_pm_put_suspend(dev);
+			creq->user_cb(dev, req, rc);
+		}
+
+		/* Remove ourselves from the backlog list */
+		spin_lock(&mgr->bl_lock);
+		list_del(&bli->list);
+		--mgr->bl_len;
+	}
+
+	spin_unlock(&mgr->bl_lock);
+}
+
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+		    struct cc_hw_desc *desc, unsigned int len,
+		    struct crypto_async_request *req)
+{
+	int rc;
+	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+	bool ivgen = !!cc_req->ivgen_dma_addr_len;
+	unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+	struct device *dev = drvdata_to_dev(drvdata);
+	bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
+	gfp_t flags = cc_gfp_flags(req);
+	struct cc_bl_item *bli;
+
+	rc = cc_pm_get(dev);
+	if (rc) {
+		dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+		return rc;
+	}
+
+	spin_lock_bh(&mgr->hw_lock);
+	rc = cc_queues_status(drvdata, mgr, total_len);
+
+#ifdef CC_DEBUG_FORCE_BACKLOG
+	if (backlog_ok)
+		rc = -ENOSPC;
+#endif /* CC_DEBUG_FORCE_BACKLOG */
+
+	if (rc == -ENOSPC && backlog_ok) {
+		spin_unlock_bh(&mgr->hw_lock);
+
+		bli = kmalloc(sizeof(*bli), flags);
+		if (!bli) {
+			cc_pm_put_suspend(dev);
+			return -ENOMEM;
+		}
+
+		memcpy(&bli->creq, cc_req, sizeof(*cc_req));
+		memcpy(&bli->desc, desc, len * sizeof(*desc));
+		bli->len = len;
+		bli->notif = false;
+		cc_enqueue_backlog(drvdata, bli);
+		return -EBUSY;
+	}
+
+	if (!rc)
+		rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
+					ivgen);
+
+	spin_unlock_bh(&mgr->hw_lock);
+	return rc;
+}
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+			 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+			 unsigned int len)
+{
+	int rc;
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+	init_completion(&cc_req->seq_compl);
+	cc_req->user_cb = request_mgr_complete;
+	cc_req->user_arg = &cc_req->seq_compl;
+
+	rc = cc_pm_get(dev);
+	if (rc) {
+		dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+		return rc;
+	}
+
+	while (true) {
+		spin_lock_bh(&mgr->hw_lock);
+		rc = cc_queues_status(drvdata, mgr, len + 1);
+
+		if (!rc)
+			break;
+
+		spin_unlock_bh(&mgr->hw_lock);
+		if (rc != -EAGAIN) {
+			cc_pm_put_suspend(dev);
+			return rc;
+		}
+		wait_for_completion_interruptible(&drvdata->hw_queue_avail);
+		reinit_completion(&drvdata->hw_queue_avail);
+	}
+
+	rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
+	spin_unlock_bh(&mgr->hw_lock);
+
+	if (rc != -EINPROGRESS) {
+		cc_pm_put_suspend(dev);
+		return rc;
+	}
+
+	wait_for_completion(&cc_req->seq_compl);
+	return 0;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware during init process.
+ * assume this function is not called in middle of a flow,
+ * since we set QUEUE_LAST_IND flag in the last descriptor.
+ *
+ * \param drvdata
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ *
+ * \return int Returns "0" upon success
+ */
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+		      unsigned int len)
+{
+	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+	unsigned int total_seq_len = len; /*initial sequence length*/
+	int rc = 0;
+
+	/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
+	 */
+	rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
+	if (rc)
+		return rc;
+
+	set_queue_last_ind(drvdata, &desc[(len - 1)]);
+
+	/*
+	 * We are about to push command to the HW via the command registers
+	 * that may refernece hsot memory. We need to issue a memory barrier
+	 * to make sure there are no outstnading memory writes
+	 */
+	wmb();
+	enqueue_seq(drvdata, desc, len);
+
+	/* Update the free slots in HW queue */
+	req_mgr_h->q_free_slots =
+		cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+
+	return 0;
+}
+
+void complete_request(struct cc_drvdata *drvdata)
+{
+	struct cc_req_mgr_handle *request_mgr_handle =
+						drvdata->request_mgr_handle;
+
+	complete(&drvdata->hw_queue_avail);
+#ifdef COMP_IN_WQ
+	queue_delayed_work(request_mgr_handle->workq,
+			   &request_mgr_handle->compwork, 0);
+#else
+	tasklet_schedule(&request_mgr_handle->comptask);
+#endif
+}
+
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work)
+{
+	struct cc_drvdata *drvdata =
+		container_of(work, struct cc_drvdata, compwork.work);
+
+	comp_handler((unsigned long)drvdata);
+}
+#endif
+
+static void proc_completions(struct cc_drvdata *drvdata)
+{
+	struct cc_crypto_req *cc_req;
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct cc_req_mgr_handle *request_mgr_handle =
+						drvdata->request_mgr_handle;
+	unsigned int *tail = &request_mgr_handle->req_queue_tail;
+	unsigned int *head = &request_mgr_handle->req_queue_head;
+
+	while (request_mgr_handle->axi_completed) {
+		request_mgr_handle->axi_completed--;
+
+		/* Dequeue request */
+		if (*head == *tail) {
+			/* We are supposed to handle a completion but our
+			 * queue is empty. This is not normal. Return and
+			 * hope for the best.
+			 */
+			dev_err(dev, "Request queue is empty head == tail %u\n",
+				*head);
+			break;
+		}
+
+		cc_req = &request_mgr_handle->req_queue[*tail];
+
+		if (cc_req->user_cb)
+			cc_req->user_cb(dev, cc_req->user_arg, 0);
+		*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
+		dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
+		dev_dbg(dev, "Request completed. axi_completed=%d\n",
+			request_mgr_handle->axi_completed);
+		cc_pm_put_suspend(dev);
+	}
+}
+
+static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
+{
+	return FIELD_GET(AXIM_MON_COMP_VALUE,
+			 cc_ioread(drvdata, drvdata->axim_mon_offset));
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void comp_handler(unsigned long devarg)
+{
+	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+	struct cc_req_mgr_handle *request_mgr_handle =
+						drvdata->request_mgr_handle;
+
+	u32 irq;
+
+	irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+
+	if (irq & CC_COMP_IRQ_MASK) {
+		/* To avoid the interrupt from firing as we unmask it,
+		 * we clear it now
+		 */
+		cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+
+		/* Avoid race with above clear: Test completion counter
+		 * once more
+		 */
+		request_mgr_handle->axi_completed +=
+				cc_axi_comp_count(drvdata);
+
+		while (request_mgr_handle->axi_completed) {
+			do {
+				proc_completions(drvdata);
+				/* At this point (after proc_completions()),
+				 * request_mgr_handle->axi_completed is 0.
+				 */
+				request_mgr_handle->axi_completed =
+						cc_axi_comp_count(drvdata);
+			} while (request_mgr_handle->axi_completed > 0);
+
+			cc_iowrite(drvdata, CC_REG(HOST_ICR),
+				   CC_COMP_IRQ_MASK);
+
+			request_mgr_handle->axi_completed +=
+					cc_axi_comp_count(drvdata);
+		}
+	}
+	/* after verifing that there is nothing to do,
+	 * unmask AXI completion interrupt
+	 */
+	cc_iowrite(drvdata, CC_REG(HOST_IMR),
+		   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+
+	cc_proc_backlog(drvdata);
+}
+
+/*
+ * resume the queue configuration - no need to take the lock as this happens
+ * inside the spin lock protection
+ */
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata)
+{
+	struct cc_req_mgr_handle *request_mgr_handle =
+		drvdata->request_mgr_handle;
+
+	spin_lock_bh(&request_mgr_handle->hw_lock);
+	request_mgr_handle->is_runtime_suspended = false;
+	spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+	return 0;
+}
+
+/*
+ * suspend the queue configuration. Since it is used for the runtime suspend
+ * only verify that the queue can be suspended.
+ */
+int cc_suspend_req_queue(struct cc_drvdata *drvdata)
+{
+	struct cc_req_mgr_handle *request_mgr_handle =
+						drvdata->request_mgr_handle;
+
+	/* lock the send_request */
+	spin_lock_bh(&request_mgr_handle->hw_lock);
+	if (request_mgr_handle->req_queue_head !=
+	    request_mgr_handle->req_queue_tail) {
+		spin_unlock_bh(&request_mgr_handle->hw_lock);
+		return -EBUSY;
+	}
+	request_mgr_handle->is_runtime_suspended = true;
+	spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+	return 0;
+}
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
+{
+	struct cc_req_mgr_handle *request_mgr_handle =
+						drvdata->request_mgr_handle;
+
+	return	request_mgr_handle->is_runtime_suspended;
+}
+
+#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
new file mode 100644
index 0000000..573cb97
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_request_mgr.h
+ * Request Manager
+ */
+
+#ifndef __REQUEST_MGR_H__
+#define __REQUEST_MGR_H__
+
+#include "cc_hw_queue_defs.h"
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param is_dout If "true": completion is handled by the caller
+ *	  If "false": this function adds a dummy descriptor completion
+ *	  and waits upon completion signal.
+ *
+ * \return int Returns -EINPROGRESS or error
+ */
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+		    struct cc_hw_desc *desc, unsigned int len,
+		    struct crypto_async_request *req);
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+			 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+			 unsigned int len);
+
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+		      unsigned int len);
+
+void complete_request(struct cc_drvdata *drvdata);
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata);
+
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata);
+
+int cc_suspend_req_queue(struct cc_drvdata *drvdata);
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
+#endif
+
+#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
new file mode 100644
index 0000000..c8c276f
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include "cc_driver.h"
+#include "cc_sram_mgr.h"
+
+/**
+ * struct cc_sram_ctx -Internal RAM context manager
+ * @sram_free_offset:   the offset to the non-allocated area
+ */
+struct cc_sram_ctx {
+	cc_sram_addr_t sram_free_offset;
+};
+
+/**
+ * cc_sram_mgr_fini() - Cleanup SRAM pool.
+ *
+ * @drvdata: Associated device driver context
+ */
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
+{
+	/* Free "this" context */
+	kfree(drvdata->sram_mgr_handle);
+}
+
+/**
+ * cc_sram_mgr_init() - Initializes SRAM pool.
+ *      The pool starts right at the beginning of SRAM.
+ *      Returns zero for success, negative value otherwise.
+ *
+ * @drvdata: Associated device driver context
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata)
+{
+	struct cc_sram_ctx *ctx;
+	dma_addr_t start = 0;
+	struct device *dev = drvdata_to_dev(drvdata);
+
+	if (drvdata->hw_rev < CC_HW_REV_712) {
+		/* Pool starts after ROM bytes */
+		start = (dma_addr_t)cc_ioread(drvdata,
+					      CC_REG(HOST_SEP_SRAM_THRESHOLD));
+
+		if ((start & 0x3) != 0) {
+			dev_err(dev, "Invalid SRAM offset %pad\n", &start);
+			return -EINVAL;
+		}
+	}
+
+	/* Allocate "this" context */
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->sram_free_offset = start;
+	drvdata->sram_mgr_handle = ctx;
+
+	return 0;
+}
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
+{
+	struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+	struct device *dev = drvdata_to_dev(drvdata);
+	cc_sram_addr_t p;
+
+	if ((size & 0x3)) {
+		dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
+			size);
+		return NULL_SRAM_ADDR;
+	}
+	if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
+		dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
+			size, smgr_ctx->sram_free_offset);
+		return NULL_SRAM_ADDR;
+	}
+
+	p = smgr_ctx->sram_free_offset;
+	smgr_ctx->sram_free_offset += size;
+	dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
+	return p;
+}
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ *	set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src:	  A pointer to array of words to set as consts.
+ * @dst:	  The target SRAM buffer to set into
+ * @nelements:	  The number of words in "src" array
+ * @seq:	  A pointer to the given IN/OUT descriptor sequence
+ * @seq_len:	  A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+		      unsigned int nelement, struct cc_hw_desc *seq,
+		      unsigned int *seq_len)
+{
+	u32 i;
+	unsigned int idx = *seq_len;
+
+	for (i = 0; i < nelement; i++, idx++) {
+		hw_desc_init(&seq[idx]);
+		set_din_const(&seq[idx], src[i], sizeof(u32));
+		set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
+		set_flow_mode(&seq[idx], BYPASS);
+	}
+
+	*seq_len = idx;
+}
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
new file mode 100644
index 0000000..d48649f
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_SRAM_MGR_H__
+#define __CC_SRAM_MGR_H__
+
+#ifndef CC_CC_SRAM_SIZE
+#define CC_CC_SRAM_SIZE 4096
+#endif
+
+struct cc_drvdata;
+
+/**
+ * Address (offset) within CC internal SRAM
+ */
+
+typedef u64 cc_sram_addr_t;
+
+#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
+
+/*!
+ * Initializes SRAM pool.
+ * The first X bytes of SRAM are reserved for ROM usage, hence, pool
+ * starts right after X bytes.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Uninits SRAM pool.
+ *
+ * \param drvdata
+ */
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ *	set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src:	  A pointer to array of words to set as consts.
+ * @dst:	  The target SRAM buffer to set into
+ * @nelements:	  The number of words in "src" array
+ * @seq:	  A pointer to the given IN/OUT descriptor sequence
+ * @seq_len:	  A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+		      unsigned int nelement, struct cc_hw_desc *seq,
+		      unsigned int *seq_len);
+
+#endif /*__CC_SRAM_MGR_H__*/
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
new file mode 100644
index 0000000..930d82d
--- /dev/null
+++ b/drivers/crypto/chelsio/Kconfig
@@ -0,0 +1,42 @@
+config CRYPTO_DEV_CHELSIO
+	tristate "Chelsio Crypto Co-processor Driver"
+	depends on CHELSIO_T4
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	select CRYPTO_AUTHENC
+	select CRYPTO_GF128MUL
+	---help---
+	  The Chelsio Crypto Co-processor driver for T6 adapters.
+
+	  For general information about Chelsio and our products, visit
+	  our website at <http://www.chelsio.com>.
+
+	  For customer support, please visit our customer support page at
+	  <http://www.chelsio.com/support.html>.
+
+	  Please send feedback to <linux-bugs@chelsio.com>.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called chcr.
+
+config CHELSIO_IPSEC_INLINE
+        bool "Chelsio IPSec XFRM Tx crypto offload"
+        depends on CHELSIO_T4
+	depends on CRYPTO_DEV_CHELSIO
+        depends on XFRM_OFFLOAD
+        depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+        default n
+        ---help---
+          Enable support for IPSec Tx Inline.
+
+config CRYPTO_DEV_CHELSIO_TLS
+        tristate "Chelsio Crypto Inline TLS Driver"
+        depends on CHELSIO_T4
+        depends on TLS
+        select CRYPTO_DEV_CHELSIO
+        ---help---
+          Support Chelsio Inline TLS with Chelsio crypto accelerator.
+
+          To compile this driver as a module, choose M here: the module
+          will be called chtls.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
new file mode 100644
index 0000000..639e571
--- /dev/null
+++ b/drivers/crypto/chelsio/Makefile
@@ -0,0 +1,6 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
+chcr-objs :=  chcr_core.o chcr_algo.o
+chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
new file mode 100644
index 0000000..010bbf6
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -0,0 +1,4325 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ *	Manoj Malviya (manojmalviya@chelsio.com)
+ *	Atul Gupta (atul.gupta@chelsio.com)
+ *	Jitendra Lulla (jlulla@chelsio.com)
+ *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
+ *	Harsh Jain (harsh@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/gcm.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/ctr.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "t4fw_api.h"
+#include "t4_msg.h"
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+#define IV AES_BLOCK_SIZE
+
+static unsigned int sgl_ent_len[] = {
+	0, 0, 16, 24, 40, 48, 64, 72, 88,
+	96, 112, 120, 136, 144, 160, 168, 184,
+	192, 208, 216, 232, 240, 256, 264, 280,
+	288, 304, 312, 328, 336, 352, 360, 376
+};
+
+static unsigned int dsgl_ent_len[] = {
+	0, 32, 32, 48, 48, 64, 64, 80, 80,
+	112, 112, 128, 128, 144, 144, 160, 160,
+	192, 192, 208, 208, 224, 224, 240, 240,
+	272, 272, 288, 288, 304, 304, 320, 320
+};
+
+static u32 round_constant[11] = {
+	0x01000000, 0x02000000, 0x04000000, 0x08000000,
+	0x10000000, 0x20000000, 0x40000000, 0x80000000,
+	0x1B000000, 0x36000000, 0x6C000000
+};
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+				   unsigned char *input, int err);
+
+static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+	return ctx->crypto_ctx->aeadctx;
+}
+
+static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
+{
+	return ctx->crypto_ctx->ablkctx;
+}
+
+static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
+{
+	return ctx->crypto_ctx->hmacctx;
+}
+
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+	return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+	return gctx->ctx->authenc;
+}
+
+static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
+{
+	return ctx->dev->u_ctx;
+}
+
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+	return (skb->len <= SGE_MAX_WR_LEN);
+}
+
+static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
+{
+	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
+}
+
+static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
+			 unsigned int entlen,
+			 unsigned int skip)
+{
+	int nents = 0;
+	unsigned int less;
+	unsigned int skip_len = 0;
+
+	while (sg && skip) {
+		if (sg_dma_len(sg) <= skip) {
+			skip -= sg_dma_len(sg);
+			skip_len = 0;
+			sg = sg_next(sg);
+		} else {
+			skip_len = skip;
+			skip = 0;
+		}
+	}
+
+	while (sg && reqlen) {
+		less = min(reqlen, sg_dma_len(sg) - skip_len);
+		nents += DIV_ROUND_UP(less, entlen);
+		reqlen -= less;
+		skip_len = 0;
+		sg = sg_next(sg);
+	}
+	return nents;
+}
+
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct chcr_alg_template *chcr_crypto_alg =
+		container_of(alg, struct chcr_alg_template, alg.aead);
+	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+	u8 temp[SHA512_DIGEST_SIZE];
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	int authsize = crypto_aead_authsize(tfm);
+	struct cpl_fw6_pld *fw6_pld;
+	int cmp = 0;
+
+	fw6_pld = (struct cpl_fw6_pld *)input;
+	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+	} else {
+
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+				authsize, req->assoclen +
+				req->cryptlen - authsize);
+		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
+	}
+	if (cmp)
+		*err = -EBADMSG;
+	else
+		*err = 0;
+}
+
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+					 unsigned char *input,
+					 int err)
+{
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+	chcr_aead_common_exit(req);
+	if (reqctx->verify == VERIFY_SW) {
+		chcr_verify_tag(req, input, &err);
+		reqctx->verify = VERIFY_HW;
+	}
+	req->base.complete(&req->base, err);
+}
+
+static void get_aes_decrypt_key(unsigned char *dec_key,
+				       const unsigned char *key,
+				       unsigned int keylength)
+{
+	u32 temp;
+	u32 w_ring[MAX_NK];
+	int i, j, k;
+	u8  nr, nk;
+
+	switch (keylength) {
+	case AES_KEYLENGTH_128BIT:
+		nk = KEYLENGTH_4BYTES;
+		nr = NUMBER_OF_ROUNDS_10;
+		break;
+	case AES_KEYLENGTH_192BIT:
+		nk = KEYLENGTH_6BYTES;
+		nr = NUMBER_OF_ROUNDS_12;
+		break;
+	case AES_KEYLENGTH_256BIT:
+		nk = KEYLENGTH_8BYTES;
+		nr = NUMBER_OF_ROUNDS_14;
+		break;
+	default:
+		return;
+	}
+	for (i = 0; i < nk; i++)
+		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+	i = 0;
+	temp = w_ring[nk - 1];
+	while (i + nk < (nr + 1) * 4) {
+		if (!(i % nk)) {
+			/* RotWord(temp) */
+			temp = (temp << 8) | (temp >> 24);
+			temp = aes_ks_subword(temp);
+			temp ^= round_constant[i / nk];
+		} else if (nk == 8 && (i % 4 == 0)) {
+			temp = aes_ks_subword(temp);
+		}
+		w_ring[i % nk] ^= temp;
+		temp = w_ring[i % nk];
+		i++;
+	}
+	i--;
+	for (k = 0, j = i % nk; k < nk; k++) {
+		*((u32 *)dec_key + k) = htonl(w_ring[j]);
+		j--;
+		if (j < 0)
+			j += nk;
+	}
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
+{
+	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
+
+	switch (ds) {
+	case SHA1_DIGEST_SIZE:
+		base_hash = crypto_alloc_shash("sha1", 0, 0);
+		break;
+	case SHA224_DIGEST_SIZE:
+		base_hash = crypto_alloc_shash("sha224", 0, 0);
+		break;
+	case SHA256_DIGEST_SIZE:
+		base_hash = crypto_alloc_shash("sha256", 0, 0);
+		break;
+	case SHA384_DIGEST_SIZE:
+		base_hash = crypto_alloc_shash("sha384", 0, 0);
+		break;
+	case SHA512_DIGEST_SIZE:
+		base_hash = crypto_alloc_shash("sha512", 0, 0);
+		break;
+	}
+
+	return base_hash;
+}
+
+static int chcr_compute_partial_hash(struct shash_desc *desc,
+				     char *iopad, char *result_hash,
+				     int digest_size)
+{
+	struct sha1_state sha1_st;
+	struct sha256_state sha256_st;
+	struct sha512_state sha512_st;
+	int error;
+
+	if (digest_size == SHA1_DIGEST_SIZE) {
+		error = crypto_shash_init(desc) ?:
+			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
+			crypto_shash_export(desc, (void *)&sha1_st);
+		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
+	} else if (digest_size == SHA224_DIGEST_SIZE) {
+		error = crypto_shash_init(desc) ?:
+			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
+			crypto_shash_export(desc, (void *)&sha256_st);
+		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
+
+	} else if (digest_size == SHA256_DIGEST_SIZE) {
+		error = crypto_shash_init(desc) ?:
+			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
+			crypto_shash_export(desc, (void *)&sha256_st);
+		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
+
+	} else if (digest_size == SHA384_DIGEST_SIZE) {
+		error = crypto_shash_init(desc) ?:
+			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
+			crypto_shash_export(desc, (void *)&sha512_st);
+		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
+
+	} else if (digest_size == SHA512_DIGEST_SIZE) {
+		error = crypto_shash_init(desc) ?:
+			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
+			crypto_shash_export(desc, (void *)&sha512_st);
+		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
+	} else {
+		error = -EINVAL;
+		pr_err("Unknown digest size %d\n", digest_size);
+	}
+	return error;
+}
+
+static void chcr_change_order(char *buf, int ds)
+{
+	int i;
+
+	if (ds == SHA512_DIGEST_SIZE) {
+		for (i = 0; i < (ds / sizeof(u64)); i++)
+			*((__be64 *)buf + i) =
+				cpu_to_be64(*((u64 *)buf + i));
+	} else {
+		for (i = 0; i < (ds / sizeof(u32)); i++)
+			*((__be32 *)buf + i) =
+				cpu_to_be32(*((u32 *)buf + i));
+	}
+}
+
+static inline int is_hmac(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct chcr_alg_template *chcr_crypto_alg =
+		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
+			     alg.hash);
+	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
+		return 1;
+	return 0;
+}
+
+static inline void dsgl_walk_init(struct dsgl_walk *walk,
+				   struct cpl_rx_phys_dsgl *dsgl)
+{
+	walk->dsgl = dsgl;
+	walk->nents = 0;
+	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
+}
+
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+				 int pci_chan_id)
+{
+	struct cpl_rx_phys_dsgl *phys_cpl;
+
+	phys_cpl = walk->dsgl;
+
+	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
+				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
+	phys_cpl->pcirlxorder_to_noofsgentr =
+		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
+		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
+		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
+		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
+		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
+		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
+	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
+	phys_cpl->rss_hdr_int.qid = htons(qid);
+	phys_cpl->rss_hdr_int.hash_val = 0;
+	phys_cpl->rss_hdr_int.channel = pci_chan_id;
+}
+
+static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
+					size_t size,
+					dma_addr_t *addr)
+{
+	int j;
+
+	if (!size)
+		return;
+	j = walk->nents;
+	walk->to->len[j % 8] = htons(size);
+	walk->to->addr[j % 8] = cpu_to_be64(*addr);
+	j++;
+	if ((j % 8) == 0)
+		walk->to++;
+	walk->nents = j;
+}
+
+static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
+			   struct scatterlist *sg,
+			      unsigned int slen,
+			      unsigned int skip)
+{
+	int skip_len = 0;
+	unsigned int left_size = slen, len = 0;
+	unsigned int j = walk->nents;
+	int offset, ent_len;
+
+	if (!slen)
+		return;
+	while (sg && skip) {
+		if (sg_dma_len(sg) <= skip) {
+			skip -= sg_dma_len(sg);
+			skip_len = 0;
+			sg = sg_next(sg);
+		} else {
+			skip_len = skip;
+			skip = 0;
+		}
+	}
+
+	while (left_size && sg) {
+		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+		offset = 0;
+		while (len) {
+			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
+			walk->to->len[j % 8] = htons(ent_len);
+			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
+						      offset + skip_len);
+			offset += ent_len;
+			len -= ent_len;
+			j++;
+			if ((j % 8) == 0)
+				walk->to++;
+		}
+		walk->last_sg = sg;
+		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
+					  skip_len) + skip_len;
+		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+		skip_len = 0;
+		sg = sg_next(sg);
+	}
+	walk->nents = j;
+}
+
+static inline void ulptx_walk_init(struct ulptx_walk *walk,
+				   struct ulptx_sgl *ulp)
+{
+	walk->sgl = ulp;
+	walk->nents = 0;
+	walk->pair_idx = 0;
+	walk->pair = ulp->sge;
+	walk->last_sg = NULL;
+	walk->last_sg_len = 0;
+}
+
+static inline void ulptx_walk_end(struct ulptx_walk *walk)
+{
+	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+			      ULPTX_NSGE_V(walk->nents));
+}
+
+
+static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
+					size_t size,
+					dma_addr_t *addr)
+{
+	if (!size)
+		return;
+
+	if (walk->nents == 0) {
+		walk->sgl->len0 = cpu_to_be32(size);
+		walk->sgl->addr0 = cpu_to_be64(*addr);
+	} else {
+		walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
+		walk->pair_idx = !walk->pair_idx;
+		if (!walk->pair_idx)
+			walk->pair++;
+	}
+	walk->nents++;
+}
+
+static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
+					struct scatterlist *sg,
+			       unsigned int len,
+			       unsigned int skip)
+{
+	int small;
+	int skip_len = 0;
+	unsigned int sgmin;
+
+	if (!len)
+		return;
+	while (sg && skip) {
+		if (sg_dma_len(sg) <= skip) {
+			skip -= sg_dma_len(sg);
+			skip_len = 0;
+			sg = sg_next(sg);
+		} else {
+			skip_len = skip;
+			skip = 0;
+		}
+	}
+	WARN(!sg, "SG should not be null here\n");
+	if (sg && (walk->nents == 0)) {
+		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
+		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+		walk->sgl->len0 = cpu_to_be32(sgmin);
+		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
+		walk->nents++;
+		len -= sgmin;
+		walk->last_sg = sg;
+		walk->last_sg_len = sgmin + skip_len;
+		skip_len += sgmin;
+		if (sg_dma_len(sg) == skip_len) {
+			sg = sg_next(sg);
+			skip_len = 0;
+		}
+	}
+
+	while (sg && len) {
+		small = min(sg_dma_len(sg) - skip_len, len);
+		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
+		walk->pair->addr[walk->pair_idx] =
+			cpu_to_be64(sg_dma_address(sg) + skip_len);
+		walk->pair_idx = !walk->pair_idx;
+		walk->nents++;
+		if (!walk->pair_idx)
+			walk->pair++;
+		len -= sgmin;
+		skip_len += sgmin;
+		walk->last_sg = sg;
+		walk->last_sg_len = skip_len;
+		if (sg_dma_len(sg) == skip_len) {
+			sg = sg_next(sg);
+			skip_len = 0;
+		}
+	}
+}
+
+static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct chcr_alg_template *chcr_crypto_alg =
+		container_of(alg, struct chcr_alg_template, alg.crypto);
+
+	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
+static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
+{
+	struct adapter *adap = netdev2adap(dev);
+	struct sge_uld_txq_info *txq_info =
+		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+	struct sge_uld_txq *txq;
+	int ret = 0;
+
+	local_bh_disable();
+	txq = &txq_info->uldtxq[idx];
+	spin_lock(&txq->sendq.lock);
+	if (txq->full)
+		ret = -1;
+	spin_unlock(&txq->sendq.lock);
+	local_bh_enable();
+	return ret;
+}
+
+static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
+			       struct _key_ctx *key_ctx)
+{
+	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
+		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
+	} else {
+		memcpy(key_ctx->key,
+		       ablkctx->key + (ablkctx->enckey_len >> 1),
+		       ablkctx->enckey_len >> 1);
+		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
+	}
+	return 0;
+}
+
+static int chcr_hash_ent_in_wr(struct scatterlist *src,
+			     unsigned int minsg,
+			     unsigned int space,
+			     unsigned int srcskip)
+{
+	int srclen = 0;
+	int srcsg = minsg;
+	int soffset = 0, sless;
+
+	if (sg_dma_len(src) == srcskip) {
+		src = sg_next(src);
+		srcskip = 0;
+	}
+	while (src && space > (sgl_ent_len[srcsg + 1])) {
+		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
+							CHCR_SRC_SG_SIZE);
+		srclen += sless;
+		soffset += sless;
+		srcsg++;
+		if (sg_dma_len(src) == (soffset + srcskip)) {
+			src = sg_next(src);
+			soffset = 0;
+			srcskip = 0;
+		}
+	}
+	return srclen;
+}
+
+static int chcr_sg_ent_in_wr(struct scatterlist *src,
+			     struct scatterlist *dst,
+			     unsigned int minsg,
+			     unsigned int space,
+			     unsigned int srcskip,
+			     unsigned int dstskip)
+{
+	int srclen = 0, dstlen = 0;
+	int srcsg = minsg, dstsg = minsg;
+	int offset = 0, soffset = 0, less, sless = 0;
+
+	if (sg_dma_len(src) == srcskip) {
+		src = sg_next(src);
+		srcskip = 0;
+	}
+	if (sg_dma_len(dst) == dstskip) {
+		dst = sg_next(dst);
+		dstskip = 0;
+	}
+
+	while (src && dst &&
+	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
+		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
+				CHCR_SRC_SG_SIZE);
+		srclen += sless;
+		srcsg++;
+		offset = 0;
+		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
+		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
+			if (srclen <= dstlen)
+				break;
+			less = min_t(unsigned int, sg_dma_len(dst) - offset -
+				     dstskip, CHCR_DST_SG_SIZE);
+			dstlen += less;
+			offset += less;
+			if ((offset + dstskip) == sg_dma_len(dst)) {
+				dst = sg_next(dst);
+				offset = 0;
+			}
+			dstsg++;
+			dstskip = 0;
+		}
+		soffset += sless;
+		if ((soffset + srcskip) == sg_dma_len(src)) {
+			src = sg_next(src);
+			srcskip = 0;
+			soffset = 0;
+		}
+
+	}
+	return min(srclen, dstlen);
+}
+
+static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+				u32 flags,
+				struct scatterlist *src,
+				struct scatterlist *dst,
+				unsigned int nbytes,
+				u8 *iv,
+				unsigned short op_type)
+{
+	int err;
+
+	SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
+
+	skcipher_request_set_tfm(subreq, cipher);
+	skcipher_request_set_callback(subreq, flags, NULL, NULL);
+	skcipher_request_set_crypt(subreq, src, dst,
+				   nbytes, iv);
+
+	err = op_type ? crypto_skcipher_decrypt(subreq) :
+		crypto_skcipher_encrypt(subreq);
+	skcipher_request_zero(subreq);
+
+	return err;
+
+}
+static inline void create_wreq(struct chcr_context *ctx,
+			       struct chcr_wr *chcr_req,
+			       struct crypto_async_request *req,
+			       unsigned int imm,
+			       int hash_sz,
+			       unsigned int len16,
+			       unsigned int sc_len,
+			       unsigned int lcb)
+{
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
+
+
+	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
+	chcr_req->wreq.pld_size_hash_size =
+		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
+	chcr_req->wreq.len16_pkd =
+		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
+	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+	chcr_req->wreq.rx_chid_to_rx_q_id =
+		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
+				!!lcb, ctx->tx_qidx);
+
+	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
+						       qid);
+	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
+				     ((sizeof(chcr_req->wreq)) >> 4)));
+
+	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
+	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+					   sizeof(chcr_req->key_ctx) + sc_len);
+}
+
+/**
+ *	create_cipher_wr - form the WR for cipher operations
+ *	@req: cipher req.
+ *	@ctx: crypto driver context of the request.
+ *	@qid: ingress qid where response of this WR should be received.
+ *	@op_type:	encryption or decryption
+ */
+static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct ulptx_sgl *ulptx;
+	struct chcr_blkcipher_req_ctx *reqctx =
+		ablkcipher_request_ctx(wrparam->req);
+	unsigned int temp = 0, transhdr_len, dst_size;
+	int error;
+	int nents;
+	unsigned int kctx_len;
+	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+			GFP_KERNEL : GFP_ATOMIC;
+	struct adapter *adap = padap(c_ctx(tfm)->dev);
+
+	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
+			      reqctx->dst_ofst);
+	dst_size = get_space_for_phys_dsgl(nents);
+	kctx_len = roundup(ablkctx->enckey_len, 16);
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
+				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
+	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
+				     (sgl_len(nents) * 8);
+	transhdr_len += temp;
+	transhdr_len = roundup(transhdr_len, 16);
+	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+	if (!skb) {
+		error = -ENOMEM;
+		goto err;
+	}
+	chcr_req = __skb_put_zero(skb, transhdr_len);
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
+
+	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi =
+			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
+
+	chcr_req->sec_cpl.cipherstop_lo_authinsert =
+			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
+							 ablkctx->ciph_mode,
+							 0, 0, IV >> 1);
+	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+							  0, 1, dst_size);
+
+	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
+	if ((reqctx->op == CHCR_DECRYPT_OP) &&
+	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
+	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
+		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
+	} else {
+		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
+		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
+			memcpy(chcr_req->key_ctx.key, ablkctx->key,
+			       ablkctx->enckey_len);
+		} else {
+			memcpy(chcr_req->key_ctx.key, ablkctx->key +
+			       (ablkctx->enckey_len >> 1),
+			       ablkctx->enckey_len >> 1);
+			memcpy(chcr_req->key_ctx.key +
+			       (ablkctx->enckey_len >> 1),
+			       ablkctx->key,
+			       ablkctx->enckey_len >> 1);
+		}
+	}
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
+	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
+
+	atomic_inc(&adap->chcr_stats.cipher_rqst);
+	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
+		+ (reqctx->imm ? (wrparam->bytes) : 0);
+	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
+		    transhdr_len, temp,
+			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
+	reqctx->skb = skb;
+
+	if (reqctx->op && (ablkctx->ciph_mode ==
+			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
+		sg_pcopy_to_buffer(wrparam->req->src,
+			sg_nents(wrparam->req->src), wrparam->req->info, 16,
+			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
+
+	return skb;
+err:
+	return ERR_PTR(error);
+}
+
+static inline int chcr_keyctx_ck_size(unsigned int keylen)
+{
+	int ck_size = 0;
+
+	if (keylen == AES_KEYSIZE_128)
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	else if (keylen == AES_KEYSIZE_192)
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	else if (keylen == AES_KEYSIZE_256)
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	else
+		ck_size = 0;
+
+	return ck_size;
+}
+static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
+				       const u8 *key,
+				       unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+	int err = 0;
+
+	crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
+				  CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+	tfm->crt_flags |=
+		crypto_skcipher_get_flags(ablkctx->sw_cipher) &
+		CRYPTO_TFM_RES_MASK;
+	return err;
+}
+
+static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
+			       const u8 *key,
+			       unsigned int keylen)
+{
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+	unsigned int ck_size, context_size;
+	u16 alignment = 0;
+	int err;
+
+	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+	if (err)
+		goto badkey_err;
+
+	ck_size = chcr_keyctx_ck_size(keylen);
+	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
+	memcpy(ablkctx->key, key, keylen);
+	ablkctx->enckey_len = keylen;
+	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
+	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+			keylen + alignment) >> 4;
+
+	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+						0, 0, context_size);
+	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
+	return 0;
+badkey_err:
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	ablkctx->enckey_len = 0;
+
+	return err;
+}
+
+static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
+				   const u8 *key,
+				   unsigned int keylen)
+{
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+	unsigned int ck_size, context_size;
+	u16 alignment = 0;
+	int err;
+
+	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+	if (err)
+		goto badkey_err;
+	ck_size = chcr_keyctx_ck_size(keylen);
+	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
+	memcpy(ablkctx->key, key, keylen);
+	ablkctx->enckey_len = keylen;
+	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+			keylen + alignment) >> 4;
+
+	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+						0, 0, context_size);
+	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+
+	return 0;
+badkey_err:
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	ablkctx->enckey_len = 0;
+
+	return err;
+}
+
+static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
+				   const u8 *key,
+				   unsigned int keylen)
+{
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+	unsigned int ck_size, context_size;
+	u16 alignment = 0;
+	int err;
+
+	if (keylen < CTR_RFC3686_NONCE_SIZE)
+		return -EINVAL;
+	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
+	       CTR_RFC3686_NONCE_SIZE);
+
+	keylen -= CTR_RFC3686_NONCE_SIZE;
+	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+	if (err)
+		goto badkey_err;
+
+	ck_size = chcr_keyctx_ck_size(keylen);
+	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
+	memcpy(ablkctx->key, key, keylen);
+	ablkctx->enckey_len = keylen;
+	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+			keylen + alignment) >> 4;
+
+	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+						0, 0, context_size);
+	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+
+	return 0;
+badkey_err:
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	ablkctx->enckey_len = 0;
+
+	return err;
+}
+static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
+{
+	unsigned int size = AES_BLOCK_SIZE;
+	__be32 *b = (__be32 *)(dstiv + size);
+	u32 c, prev;
+
+	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
+	for (; size >= 4; size -= 4) {
+		prev = be32_to_cpu(*--b);
+		c = prev + add;
+		*b = cpu_to_be32(c);
+		if (prev < c)
+			break;
+		add = 1;
+	}
+
+}
+
+static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
+{
+	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
+	u64 c;
+	u32 temp = be32_to_cpu(*--b);
+
+	temp = ~temp;
+	c = (u64)temp +  1; // No of block can processed withou overflow
+	if ((bytes / AES_BLOCK_SIZE) > c)
+		bytes = c * AES_BLOCK_SIZE;
+	return bytes;
+}
+
+static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+			     u32 isfinal)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct crypto_cipher *cipher;
+	int ret, i;
+	u8 *key;
+	unsigned int keylen;
+	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
+	int round8 = round / 8;
+
+	cipher = ablkctx->aes_generic;
+	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
+
+	keylen = ablkctx->enckey_len / 2;
+	key = ablkctx->key + keylen;
+	ret = crypto_cipher_setkey(cipher, key, keylen);
+	if (ret)
+		goto out;
+	crypto_cipher_encrypt_one(cipher, iv, iv);
+	for (i = 0; i < round8; i++)
+		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
+
+	for (i = 0; i < (round % 8); i++)
+		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
+
+	if (!isfinal)
+		crypto_cipher_decrypt_one(cipher, iv, iv);
+out:
+	return ret;
+}
+
+static int chcr_update_cipher_iv(struct ablkcipher_request *req,
+				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+	int ret = 0;
+
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
+		ctr_add_iv(iv, req->info, (reqctx->processed /
+			   AES_BLOCK_SIZE));
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
+		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
+						AES_BLOCK_SIZE) + 1);
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+		ret = chcr_update_tweak(req, iv, 0);
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
+		if (reqctx->op)
+			/*Updated before sending last WR*/
+			memcpy(iv, req->info, AES_BLOCK_SIZE);
+		else
+			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
+	}
+
+	return ret;
+
+}
+
+/* We need separate function for final iv because in rfc3686  Initial counter
+ * starts from 1 and buffer size of iv is 8 byte only which remains constant
+ * for subsequent update requests
+ */
+
+static int chcr_final_cipher_iv(struct ablkcipher_request *req,
+				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+	int ret = 0;
+
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
+		ctr_add_iv(iv, req->info, (reqctx->processed /
+			   AES_BLOCK_SIZE));
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+		ret = chcr_update_tweak(req, iv, 1);
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
+		/*Already updated for Decrypt*/
+		if (!reqctx->op)
+			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
+
+	}
+	return ret;
+
+}
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+				   unsigned char *input, int err)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+	struct sk_buff *skb;
+	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct  cipher_wr_param wrparam;
+	int bytes;
+
+	if (err)
+		goto unmap;
+	if (req->nbytes == reqctx->processed) {
+		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+				      req);
+		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
+		goto complete;
+	}
+
+	if (!reqctx->imm) {
+		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
+					  CIP_SPACE_LEFT(ablkctx->enckey_len),
+					  reqctx->src_ofst, reqctx->dst_ofst);
+		if ((bytes + reqctx->processed) >= req->nbytes)
+			bytes  = req->nbytes - reqctx->processed;
+		else
+			bytes = rounddown(bytes, 16);
+	} else {
+		/*CTR mode counter overfloa*/
+		bytes  = req->nbytes - reqctx->processed;
+	}
+	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
+	if (err)
+		goto unmap;
+
+	if (unlikely(bytes == 0)) {
+		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+				      req);
+		err = chcr_cipher_fallback(ablkctx->sw_cipher,
+				     req->base.flags,
+				     req->src,
+				     req->dst,
+				     req->nbytes,
+				     req->info,
+				     reqctx->op);
+		goto complete;
+	}
+
+	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	    CRYPTO_ALG_SUB_TYPE_CTR)
+		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
+	wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
+	wrparam.req = req;
+	wrparam.bytes = bytes;
+	skb = create_cipher_wr(&wrparam);
+	if (IS_ERR(skb)) {
+		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
+		err = PTR_ERR(skb);
+		goto unmap;
+	}
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+	chcr_send_wr(skb);
+	reqctx->last_req_len = bytes;
+	reqctx->processed += bytes;
+	return 0;
+unmap:
+	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+complete:
+	req->base.complete(&req->base, err);
+	return err;
+}
+
+static int process_cipher(struct ablkcipher_request *req,
+				  unsigned short qid,
+				  struct sk_buff **skb,
+				  unsigned short op_type)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+	struct	cipher_wr_param wrparam;
+	int bytes, err = -EINVAL;
+
+	reqctx->processed = 0;
+	if (!req->info)
+		goto error;
+	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
+	    (req->nbytes == 0) ||
+	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
+		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+		       ablkctx->enckey_len, req->nbytes, ivsize);
+		goto error;
+	}
+	chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+	if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+					    AES_MIN_KEY_SIZE +
+					    sizeof(struct cpl_rx_phys_dsgl) +
+					/*Min dsgl size*/
+					    32))) {
+		/* Can be sent as Imm*/
+		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
+
+		dnents = sg_nents_xlen(req->dst, req->nbytes,
+				       CHCR_DST_SG_SIZE, 0);
+		phys_dsgl = get_space_for_phys_dsgl(dnents);
+		kctx_len = roundup(ablkctx->enckey_len, 16);
+		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
+		reqctx->imm = (transhdr_len + IV + req->nbytes) <=
+			SGE_MAX_WR_LEN;
+		bytes = IV + req->nbytes;
+
+	} else {
+		reqctx->imm = 0;
+	}
+
+	if (!reqctx->imm) {
+		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
+					  CIP_SPACE_LEFT(ablkctx->enckey_len),
+					  0, 0);
+		if ((bytes + reqctx->processed) >= req->nbytes)
+			bytes  = req->nbytes - reqctx->processed;
+		else
+			bytes = rounddown(bytes, 16);
+	} else {
+		bytes = req->nbytes;
+	}
+	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	    CRYPTO_ALG_SUB_TYPE_CTR) {
+		bytes = adjust_ctr_overflow(req->info, bytes);
+	}
+	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
+		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
+		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
+				CTR_RFC3686_IV_SIZE);
+
+		/* initialize counter portion of counter block */
+		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+
+	} else {
+
+		memcpy(reqctx->iv, req->info, IV);
+	}
+	if (unlikely(bytes == 0)) {
+		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+				      req);
+		err = chcr_cipher_fallback(ablkctx->sw_cipher,
+					   req->base.flags,
+					   req->src,
+					   req->dst,
+					   req->nbytes,
+					   reqctx->iv,
+					   op_type);
+		goto error;
+	}
+	reqctx->op = op_type;
+	reqctx->srcsg = req->src;
+	reqctx->dstsg = req->dst;
+	reqctx->src_ofst = 0;
+	reqctx->dst_ofst = 0;
+	wrparam.qid = qid;
+	wrparam.req = req;
+	wrparam.bytes = bytes;
+	*skb = create_cipher_wr(&wrparam);
+	if (IS_ERR(*skb)) {
+		err = PTR_ERR(*skb);
+		goto unmap;
+	}
+	reqctx->processed = bytes;
+	reqctx->last_req_len = bytes;
+
+	return 0;
+unmap:
+	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+error:
+	return err;
+}
+
+static int chcr_aes_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct sk_buff *skb = NULL;
+	int err, isfull = 0;
+	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+
+	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+					    c_ctx(tfm)->tx_qidx))) {
+		isfull = 1;
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -ENOSPC;
+	}
+
+	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+			     &skb, CHCR_ENCRYPT_OP);
+	if (err || !skb)
+		return  err;
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+	chcr_send_wr(skb);
+	return isfull ? -EBUSY : -EINPROGRESS;
+}
+
+static int chcr_aes_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+	struct sk_buff *skb = NULL;
+	int err, isfull = 0;
+
+	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+					    c_ctx(tfm)->tx_qidx))) {
+		isfull = 1;
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -ENOSPC;
+	}
+
+	 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+			      &skb, CHCR_DECRYPT_OP);
+	if (err || !skb)
+		return err;
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+	chcr_send_wr(skb);
+	return isfull ? -EBUSY : -EINPROGRESS;
+}
+
+static int chcr_device_init(struct chcr_context *ctx)
+{
+	struct uld_ctx *u_ctx = NULL;
+	struct adapter *adap;
+	unsigned int id;
+	int txq_perchan, txq_idx, ntxq;
+	int err = 0, rxq_perchan, rxq_idx;
+
+	id = smp_processor_id();
+	if (!ctx->dev) {
+		u_ctx = assign_chcr_device();
+		if (!u_ctx) {
+			pr_err("chcr device assignment fails\n");
+			goto out;
+		}
+		ctx->dev = u_ctx->dev;
+		adap = padap(ctx->dev);
+		ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
+				    adap->vres.ncrypto_fc);
+		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
+		txq_perchan = ntxq / u_ctx->lldi.nchan;
+		spin_lock(&ctx->dev->lock_chcr_dev);
+		ctx->tx_chan_id = ctx->dev->tx_channel_id;
+		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
+		ctx->dev->rx_channel_id = 0;
+		spin_unlock(&ctx->dev->lock_chcr_dev);
+		rxq_idx = ctx->tx_chan_id * rxq_perchan;
+		rxq_idx += id % rxq_perchan;
+		txq_idx = ctx->tx_chan_id * txq_perchan;
+		txq_idx += id % txq_perchan;
+		ctx->rx_qidx = rxq_idx;
+		ctx->tx_qidx = txq_idx;
+		/* Channel Id used by SGE to forward packet to Host.
+		 * Same value should be used in cpl_fw6_pld RSS_CH field
+		 * by FW. Driver programs PCI channel ID to be used in fw
+		 * at the time of queue allocation with value "pi->tx_chan"
+		 */
+		ctx->pci_chan_id = txq_idx / txq_perchan;
+	}
+out:
+	return err;
+}
+
+static int chcr_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ablkctx->sw_cipher)) {
+		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+		return PTR_ERR(ablkctx->sw_cipher);
+	}
+
+	if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
+		/* To update tweak*/
+		ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
+		if (IS_ERR(ablkctx->aes_generic)) {
+			pr_err("failed to allocate aes cipher for tweak\n");
+			return PTR_ERR(ablkctx->aes_generic);
+		}
+	} else
+		ablkctx->aes_generic = NULL;
+
+	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
+	return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+static int chcr_rfc3686_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
+	 * cannot be used as fallback in chcr_handle_cipher_response
+	 */
+	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ablkctx->sw_cipher)) {
+		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+		return PTR_ERR(ablkctx->sw_cipher);
+	}
+	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
+	return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+
+static void chcr_cra_exit(struct crypto_tfm *tfm)
+{
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+	crypto_free_skcipher(ablkctx->sw_cipher);
+	if (ablkctx->aes_generic)
+		crypto_free_cipher(ablkctx->aes_generic);
+}
+
+static int get_alg_config(struct algo_param *params,
+			  unsigned int auth_size)
+{
+	switch (auth_size) {
+	case SHA1_DIGEST_SIZE:
+		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
+		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
+		params->result_size = SHA1_DIGEST_SIZE;
+		break;
+	case SHA224_DIGEST_SIZE:
+		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
+		params->result_size = SHA256_DIGEST_SIZE;
+		break;
+	case SHA256_DIGEST_SIZE:
+		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
+		params->result_size = SHA256_DIGEST_SIZE;
+		break;
+	case SHA384_DIGEST_SIZE:
+		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
+		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
+		params->result_size = SHA512_DIGEST_SIZE;
+		break;
+	case SHA512_DIGEST_SIZE:
+		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
+		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
+		params->result_size = SHA512_DIGEST_SIZE;
+		break;
+	default:
+		pr_err("chcr : ERROR, unsupported digest size\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
+{
+		crypto_free_shash(base_hash);
+}
+
+/**
+ *	create_hash_wr - Create hash work request
+ *	@req - Cipher req base
+ */
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+				      struct hash_wr_param *param)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
+	struct sk_buff *skb = NULL;
+	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+	struct chcr_wr *chcr_req;
+	struct ulptx_sgl *ulptx;
+	unsigned int nents = 0, transhdr_len;
+	unsigned int temp = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+	struct adapter *adap = padap(h_ctx(tfm)->dev);
+	int error = 0;
+
+	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
+	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
+				param->sg_len) <= SGE_MAX_WR_LEN;
+	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
+		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
+	nents += param->bfr_len ? 1 : 0;
+	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
+				param->sg_len, 16) : (sgl_len(nents) * 8);
+	transhdr_len = roundup(transhdr_len, 16);
+
+	skb = alloc_skb(transhdr_len, flags);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+	chcr_req = __skb_put_zero(skb, transhdr_len);
+
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
+	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
+
+	chcr_req->sec_cpl.aadstart_cipherstop_hi =
+		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
+	chcr_req->sec_cpl.cipherstop_lo_authinsert =
+		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
+	chcr_req->sec_cpl.seqno_numivs =
+		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
+					 param->opad_needed, 0);
+
+	chcr_req->sec_cpl.ivgen_hdrlen =
+		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
+
+	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+	       param->alg_prm.result_size);
+
+	if (param->opad_needed)
+		memcpy(chcr_req->key_ctx.key +
+		       ((param->alg_prm.result_size <= 32) ? 32 :
+			CHCR_HASH_MAX_DIGEST_SIZE),
+		       hmacctx->opad, param->alg_prm.result_size);
+
+	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+					    param->alg_prm.mk_size, 0,
+					    param->opad_needed,
+					    ((param->kctx_len +
+					     sizeof(chcr_req->key_ctx)) >> 4));
+	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
+	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
+				     DUMMY_BYTES);
+	if (param->bfr_len != 0) {
+		req_ctx->hctx_wr.dma_addr =
+			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
+				       param->bfr_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
+				       req_ctx->hctx_wr. dma_addr)) {
+			error = -ENOMEM;
+			goto err;
+		}
+		req_ctx->hctx_wr.dma_len = param->bfr_len;
+	} else {
+		req_ctx->hctx_wr.dma_addr = 0;
+	}
+	chcr_add_hash_src_ent(req, ulptx, param);
+	/* Request upto max wr size */
+	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
+				(param->sg_len + param->bfr_len) : 0);
+	atomic_inc(&adap->chcr_stats.digest_rqst);
+	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
+		    param->hash_size, transhdr_len,
+		    temp,  0);
+	req_ctx->hctx_wr.skb = skb;
+	return skb;
+err:
+	kfree_skb(skb);
+	return  ERR_PTR(error);
+}
+
+static int chcr_ahash_update(struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+	struct uld_ctx *u_ctx = NULL;
+	struct sk_buff *skb;
+	u8 remainder = 0, bs;
+	unsigned int nbytes = req->nbytes;
+	struct hash_wr_param params;
+	int error, isfull = 0;
+
+	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+	u_ctx = ULD_CTX(h_ctx(rtfm));
+	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+					    h_ctx(rtfm)->tx_qidx))) {
+		isfull = 1;
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -ENOSPC;
+	}
+
+	if (nbytes + req_ctx->reqlen >= bs) {
+		remainder = (nbytes + req_ctx->reqlen) % bs;
+		nbytes = nbytes + req_ctx->reqlen - remainder;
+	} else {
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+				   + req_ctx->reqlen, nbytes, 0);
+		req_ctx->reqlen += nbytes;
+		return 0;
+	}
+	chcr_init_hctx_per_wr(req_ctx);
+	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+	if (error)
+		return -ENOMEM;
+	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+	params.kctx_len = roundup(params.alg_prm.result_size, 16);
+	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+				     HASH_SPACE_LEFT(params.kctx_len), 0);
+	if (params.sg_len > req->nbytes)
+		params.sg_len = req->nbytes;
+	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
+			req_ctx->reqlen;
+	params.opad_needed = 0;
+	params.more = 1;
+	params.last = 0;
+	params.bfr_len = req_ctx->reqlen;
+	params.scmd1 = 0;
+	req_ctx->hctx_wr.srcsg = req->src;
+
+	params.hash_size = params.alg_prm.result_size;
+	req_ctx->data_len += params.sg_len + params.bfr_len;
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb)) {
+		error = PTR_ERR(skb);
+		goto unmap;
+	}
+
+	req_ctx->hctx_wr.processed += params.sg_len;
+	if (remainder) {
+		/* Swap buffers */
+		swap(req_ctx->reqbfr, req_ctx->skbfr);
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+				   req_ctx->reqbfr, remainder, req->nbytes -
+				   remainder);
+	}
+	req_ctx->reqlen = remainder;
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+	chcr_send_wr(skb);
+
+	return isfull ? -EBUSY : -EINPROGRESS;
+unmap:
+	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+	return error;
+}
+
+static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
+{
+	memset(bfr_ptr, 0, bs);
+	*bfr_ptr = 0x80;
+	if (bs == 64)
+		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
+	else
+		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
+}
+
+static int chcr_ahash_final(struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+	struct hash_wr_param params;
+	struct sk_buff *skb;
+	struct uld_ctx *u_ctx = NULL;
+	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+	chcr_init_hctx_per_wr(req_ctx);
+	u_ctx = ULD_CTX(h_ctx(rtfm));
+	if (is_hmac(crypto_ahash_tfm(rtfm)))
+		params.opad_needed = 1;
+	else
+		params.opad_needed = 0;
+	params.sg_len = 0;
+	req_ctx->hctx_wr.isfinal = 1;
+	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+	params.kctx_len = roundup(params.alg_prm.result_size, 16);
+	if (is_hmac(crypto_ahash_tfm(rtfm))) {
+		params.opad_needed = 1;
+		params.kctx_len *= 2;
+	} else {
+		params.opad_needed = 0;
+	}
+
+	req_ctx->hctx_wr.result = 1;
+	params.bfr_len = req_ctx->reqlen;
+	req_ctx->data_len += params.bfr_len + params.sg_len;
+	req_ctx->hctx_wr.srcsg = req->src;
+	if (req_ctx->reqlen == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
+		params.last = 0;
+		params.more = 1;
+		params.scmd1 = 0;
+		params.bfr_len = bs;
+
+	} else {
+		params.scmd1 = req_ctx->data_len;
+		params.last = 1;
+		params.more = 0;
+	}
+	params.hash_size = crypto_ahash_digestsize(rtfm);
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+	req_ctx->reqlen = 0;
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+	chcr_send_wr(skb);
+	return -EINPROGRESS;
+}
+
+static int chcr_ahash_finup(struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+	struct uld_ctx *u_ctx = NULL;
+	struct sk_buff *skb;
+	struct hash_wr_param params;
+	u8  bs;
+	int error, isfull = 0;
+
+	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+	u_ctx = ULD_CTX(h_ctx(rtfm));
+
+	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+					    h_ctx(rtfm)->tx_qidx))) {
+		isfull = 1;
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -ENOSPC;
+	}
+	chcr_init_hctx_per_wr(req_ctx);
+	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+	if (error)
+		return -ENOMEM;
+
+	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+	params.kctx_len = roundup(params.alg_prm.result_size, 16);
+	if (is_hmac(crypto_ahash_tfm(rtfm))) {
+		params.kctx_len *= 2;
+		params.opad_needed = 1;
+	} else {
+		params.opad_needed = 0;
+	}
+
+	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+				    HASH_SPACE_LEFT(params.kctx_len), 0);
+	if (params.sg_len < req->nbytes) {
+		if (is_hmac(crypto_ahash_tfm(rtfm))) {
+			params.kctx_len /= 2;
+			params.opad_needed = 0;
+		}
+		params.last = 0;
+		params.more = 1;
+		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
+					- req_ctx->reqlen;
+		params.hash_size = params.alg_prm.result_size;
+		params.scmd1 = 0;
+	} else {
+		params.last = 1;
+		params.more = 0;
+		params.sg_len = req->nbytes;
+		params.hash_size = crypto_ahash_digestsize(rtfm);
+		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
+				params.sg_len;
+	}
+	params.bfr_len = req_ctx->reqlen;
+	req_ctx->data_len += params.bfr_len + params.sg_len;
+	req_ctx->hctx_wr.result = 1;
+	req_ctx->hctx_wr.srcsg = req->src;
+	if ((req_ctx->reqlen + req->nbytes) == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
+		params.last = 0;
+		params.more = 1;
+		params.scmd1 = 0;
+		params.bfr_len = bs;
+	}
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb)) {
+		error = PTR_ERR(skb);
+		goto unmap;
+	}
+	req_ctx->reqlen = 0;
+	req_ctx->hctx_wr.processed += params.sg_len;
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+	chcr_send_wr(skb);
+
+	return isfull ? -EBUSY : -EINPROGRESS;
+unmap:
+	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+	return error;
+}
+
+static int chcr_ahash_digest(struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+	struct uld_ctx *u_ctx = NULL;
+	struct sk_buff *skb;
+	struct hash_wr_param params;
+	u8  bs;
+	int error, isfull = 0;
+
+	rtfm->init(req);
+	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+	u_ctx = ULD_CTX(h_ctx(rtfm));
+	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+					    h_ctx(rtfm)->tx_qidx))) {
+		isfull = 1;
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -ENOSPC;
+	}
+
+	chcr_init_hctx_per_wr(req_ctx);
+	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+	if (error)
+		return -ENOMEM;
+
+	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+	params.kctx_len = roundup(params.alg_prm.result_size, 16);
+	if (is_hmac(crypto_ahash_tfm(rtfm))) {
+		params.kctx_len *= 2;
+		params.opad_needed = 1;
+	} else {
+		params.opad_needed = 0;
+	}
+	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+				HASH_SPACE_LEFT(params.kctx_len), 0);
+	if (params.sg_len < req->nbytes) {
+		if (is_hmac(crypto_ahash_tfm(rtfm))) {
+			params.kctx_len /= 2;
+			params.opad_needed = 0;
+		}
+		params.last = 0;
+		params.more = 1;
+		params.scmd1 = 0;
+		params.sg_len = rounddown(params.sg_len, bs);
+		params.hash_size = params.alg_prm.result_size;
+	} else {
+		params.sg_len = req->nbytes;
+		params.hash_size = crypto_ahash_digestsize(rtfm);
+		params.last = 1;
+		params.more = 0;
+		params.scmd1 = req->nbytes + req_ctx->data_len;
+
+	}
+	params.bfr_len = 0;
+	req_ctx->hctx_wr.result = 1;
+	req_ctx->hctx_wr.srcsg = req->src;
+	req_ctx->data_len += params.bfr_len + params.sg_len;
+
+	if (req->nbytes == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, 0);
+		params.more = 1;
+		params.bfr_len = bs;
+	}
+
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb)) {
+		error = PTR_ERR(skb);
+		goto unmap;
+	}
+	req_ctx->hctx_wr.processed += params.sg_len;
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+	chcr_send_wr(skb);
+	return isfull ? -EBUSY : -EINPROGRESS;
+unmap:
+	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+	return error;
+}
+
+static int chcr_ahash_continue(struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
+	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+	struct uld_ctx *u_ctx = NULL;
+	struct sk_buff *skb;
+	struct hash_wr_param params;
+	u8  bs;
+	int error;
+
+	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+	u_ctx = ULD_CTX(h_ctx(rtfm));
+	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+	params.kctx_len = roundup(params.alg_prm.result_size, 16);
+	if (is_hmac(crypto_ahash_tfm(rtfm))) {
+		params.kctx_len *= 2;
+		params.opad_needed = 1;
+	} else {
+		params.opad_needed = 0;
+	}
+	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
+					    HASH_SPACE_LEFT(params.kctx_len),
+					    hctx_wr->src_ofst);
+	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
+		params.sg_len = req->nbytes - hctx_wr->processed;
+	if (!hctx_wr->result ||
+	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
+		if (is_hmac(crypto_ahash_tfm(rtfm))) {
+			params.kctx_len /= 2;
+			params.opad_needed = 0;
+		}
+		params.last = 0;
+		params.more = 1;
+		params.sg_len = rounddown(params.sg_len, bs);
+		params.hash_size = params.alg_prm.result_size;
+		params.scmd1 = 0;
+	} else {
+		params.last = 1;
+		params.more = 0;
+		params.hash_size = crypto_ahash_digestsize(rtfm);
+		params.scmd1 = reqctx->data_len + params.sg_len;
+	}
+	params.bfr_len = 0;
+	reqctx->data_len += params.sg_len;
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb)) {
+		error = PTR_ERR(skb);
+		goto err;
+	}
+	hctx_wr->processed += params.sg_len;
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+	chcr_send_wr(skb);
+	return 0;
+err:
+	return error;
+}
+
+static inline void chcr_handle_ahash_resp(struct ahash_request *req,
+					  unsigned char *input,
+					  int err)
+{
+	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
+	int digestsize, updated_digestsize;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+
+	if (input == NULL)
+		goto out;
+	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+	updated_digestsize = digestsize;
+	if (digestsize == SHA224_DIGEST_SIZE)
+		updated_digestsize = SHA256_DIGEST_SIZE;
+	else if (digestsize == SHA384_DIGEST_SIZE)
+		updated_digestsize = SHA512_DIGEST_SIZE;
+
+	if (hctx_wr->dma_addr) {
+		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
+				 hctx_wr->dma_len, DMA_TO_DEVICE);
+		hctx_wr->dma_addr = 0;
+	}
+	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
+				 req->nbytes)) {
+		if (hctx_wr->result == 1) {
+			hctx_wr->result = 0;
+			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
+			       digestsize);
+		} else {
+			memcpy(reqctx->partial_hash,
+			       input + sizeof(struct cpl_fw6_pld),
+			       updated_digestsize);
+
+		}
+		goto unmap;
+	}
+	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
+	       updated_digestsize);
+
+	err = chcr_ahash_continue(req);
+	if (err)
+		goto unmap;
+	return;
+unmap:
+	if (hctx_wr->is_sg_map)
+		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+
+
+out:
+	req->base.complete(&req->base, err);
+}
+
+/*
+ *	chcr_handle_resp - Unmap the DMA buffers associated with the request
+ *	@req: crypto request
+ */
+int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
+			 int err)
+{
+	struct crypto_tfm *tfm = req->tfm;
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct adapter *adap = padap(ctx->dev);
+
+	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+	case CRYPTO_ALG_TYPE_AEAD:
+		chcr_handle_aead_resp(aead_request_cast(req), input, err);
+		break;
+
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+					       input, err);
+		break;
+
+	case CRYPTO_ALG_TYPE_AHASH:
+		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
+		}
+	atomic_inc(&adap->chcr_stats.complete);
+	return err;
+}
+static int chcr_ahash_export(struct ahash_request *areq, void *out)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct chcr_ahash_req_ctx *state = out;
+
+	state->reqlen = req_ctx->reqlen;
+	state->data_len = req_ctx->data_len;
+	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
+	memcpy(state->partial_hash, req_ctx->partial_hash,
+	       CHCR_HASH_MAX_DIGEST_SIZE);
+	chcr_init_hctx_per_wr(state);
+		return 0;
+}
+
+static int chcr_ahash_import(struct ahash_request *areq, const void *in)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
+
+	req_ctx->reqlen = state->reqlen;
+	req_ctx->data_len = state->data_len;
+	req_ctx->reqbfr = req_ctx->bfr1;
+	req_ctx->skbfr = req_ctx->bfr2;
+	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
+	memcpy(req_ctx->partial_hash, state->partial_hash,
+	       CHCR_HASH_MAX_DIGEST_SIZE);
+	chcr_init_hctx_per_wr(req_ctx);
+	return 0;
+}
+
+static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+			     unsigned int keylen)
+{
+	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
+	unsigned int digestsize = crypto_ahash_digestsize(tfm);
+	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	unsigned int i, err = 0, updated_digestsize;
+
+	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+	/* use the key to calculate the ipad and opad. ipad will sent with the
+	 * first request's data. opad will be sent with the final hash result
+	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
+	 */
+	shash->tfm = hmacctx->base_hash;
+	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
+	if (keylen > bs) {
+		err = crypto_shash_digest(shash, key, keylen,
+					  hmacctx->ipad);
+		if (err)
+			goto out;
+		keylen = digestsize;
+	} else {
+		memcpy(hmacctx->ipad, key, keylen);
+	}
+	memset(hmacctx->ipad + keylen, 0, bs - keylen);
+	memcpy(hmacctx->opad, hmacctx->ipad, bs);
+
+	for (i = 0; i < bs / sizeof(int); i++) {
+		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
+		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
+	}
+
+	updated_digestsize = digestsize;
+	if (digestsize == SHA224_DIGEST_SIZE)
+		updated_digestsize = SHA256_DIGEST_SIZE;
+	else if (digestsize == SHA384_DIGEST_SIZE)
+		updated_digestsize = SHA512_DIGEST_SIZE;
+	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
+					hmacctx->ipad, digestsize);
+	if (err)
+		goto out;
+	chcr_change_order(hmacctx->ipad, updated_digestsize);
+
+	err = chcr_compute_partial_hash(shash, hmacctx->opad,
+					hmacctx->opad, digestsize);
+	if (err)
+		goto out;
+	chcr_change_order(hmacctx->opad, updated_digestsize);
+out:
+	return err;
+}
+
+static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			       unsigned int key_len)
+{
+	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+	unsigned short context_size = 0;
+	int err;
+
+	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
+	if (err)
+		goto badkey_err;
+
+	memcpy(ablkctx->key, key, key_len);
+	ablkctx->enckey_len = key_len;
+	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+	ablkctx->key_ctx_hdr =
+		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+				 CHCR_KEYCTX_NO_KEY, 1,
+				 0, context_size);
+	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+	return 0;
+badkey_err:
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	ablkctx->enckey_len = 0;
+
+	return err;
+}
+
+static int chcr_sha_init(struct ahash_request *areq)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	int digestsize =  crypto_ahash_digestsize(tfm);
+
+	req_ctx->data_len = 0;
+	req_ctx->reqlen = 0;
+	req_ctx->reqbfr = req_ctx->bfr1;
+	req_ctx->skbfr = req_ctx->bfr2;
+	copy_hash_init_values(req_ctx->partial_hash, digestsize);
+
+	return 0;
+}
+
+static int chcr_sha_cra_init(struct crypto_tfm *tfm)
+{
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct chcr_ahash_req_ctx));
+	return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+static int chcr_hmac_init(struct ahash_request *areq)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
+	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
+	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
+	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+	chcr_sha_init(areq);
+	req_ctx->data_len = bs;
+	if (is_hmac(crypto_ahash_tfm(rtfm))) {
+		if (digestsize == SHA224_DIGEST_SIZE)
+			memcpy(req_ctx->partial_hash, hmacctx->ipad,
+			       SHA256_DIGEST_SIZE);
+		else if (digestsize == SHA384_DIGEST_SIZE)
+			memcpy(req_ctx->partial_hash, hmacctx->ipad,
+			       SHA512_DIGEST_SIZE);
+		else
+			memcpy(req_ctx->partial_hash, hmacctx->ipad,
+			       digestsize);
+	}
+	return 0;
+}
+
+static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+	unsigned int digestsize =
+		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct chcr_ahash_req_ctx));
+	hmacctx->base_hash = chcr_alloc_shash(digestsize);
+	if (IS_ERR(hmacctx->base_hash))
+		return PTR_ERR(hmacctx->base_hash);
+	return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+
+	if (hmacctx->base_hash) {
+		chcr_free_shash(hmacctx->base_hash);
+		hmacctx->base_hash = NULL;
+	}
+}
+
+inline void chcr_aead_common_exit(struct aead_request *req)
+{
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+}
+
+static int chcr_aead_common_init(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int error = -EINVAL;
+
+	/* validate key size */
+	if (aeadctx->enckey_len == 0)
+		goto err;
+	if (reqctx->op && req->cryptlen < authsize)
+		goto err;
+	if (reqctx->b0_len)
+		reqctx->scratch_pad = reqctx->iv + IV;
+	else
+		reqctx->scratch_pad = NULL;
+
+	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+				  reqctx->op);
+	if (error) {
+		error = -ENOMEM;
+		goto err;
+	}
+	reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
+					  CHCR_SRC_SG_SIZE, 0);
+	reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
+					  CHCR_SRC_SG_SIZE, req->assoclen);
+	return 0;
+err:
+	return error;
+}
+
+static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
+				   int aadmax, int wrlen,
+				   unsigned short op_type)
+{
+	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
+	    dst_nents > MAX_DSGL_ENT ||
+	    (req->assoclen > aadmax) ||
+	    (wrlen > SGE_MAX_WR_LEN))
+		return 1;
+	return 0;
+}
+
+static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	struct aead_request *subreq = aead_request_ctx(req);
+
+	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
+	aead_request_set_callback(subreq, req->base.flags,
+				  req->base.complete, req->base.data);
+	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+				 req->iv);
+	 aead_request_set_ad(subreq, req->assoclen);
+	return op_type ? crypto_aead_decrypt(subreq) :
+		crypto_aead_encrypt(subreq);
+}
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+					 unsigned short qid,
+					 int size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct ulptx_sgl *ulptx;
+	unsigned int transhdr_len;
+	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
+	unsigned int   kctx_len = 0, dnents;
+	unsigned int  assoclen = req->assoclen;
+	unsigned int  authsize = crypto_aead_authsize(tfm);
+	int error = -EINVAL;
+	int null = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+	struct adapter *adap = padap(a_ctx(tfm)->dev);
+
+	if (req->cryptlen == 0)
+		return NULL;
+
+	reqctx->b0_len = 0;
+	error = chcr_aead_common_init(req);
+	if (error)
+		return ERR_PTR(error);
+
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+		null = 1;
+		assoclen = 0;
+		reqctx->aad_nents = 0;
+	}
+	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+	dnents += sg_nents_xlen(req->dst, req->cryptlen +
+		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
+		req->assoclen);
+	dnents += MIN_AUTH_SG; // For IV
+
+	dst_size = get_space_for_phys_dsgl(dnents);
+	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+		- sizeof(chcr_req->key_ctx);
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+			SGE_MAX_WR_LEN;
+	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
+			: (sgl_len(reqctx->src_nents + reqctx->aad_nents
+			+ MIN_GCM_SG) * 8);
+	transhdr_len += temp;
+	transhdr_len = roundup(transhdr_len, 16);
+
+	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+				    transhdr_len, reqctx->op)) {
+		atomic_inc(&adap->chcr_stats.fallback);
+		chcr_aead_common_exit(req);
+		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+	}
+	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+	if (!skb) {
+		error = -ENOMEM;
+		goto err;
+	}
+
+	chcr_req = __skb_put_zero(skb, transhdr_len);
+
+	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+	/*
+	 * Input order	is AAD,IV and Payload. where IV should be included as
+	 * the part of authdata. All other fields should be filled according
+	 * to the hardware spec
+	 */
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
+				       assoclen + 1);
+	chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					assoclen ? 1 : 0, assoclen,
+					assoclen + IV + 1,
+					(temp & 0x1F0) >> 4);
+	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+					temp & 0xF,
+					null ? 0 : assoclen + IV + 1,
+					temp, temp);
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+	else
+		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
+	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
+					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
+					temp,
+					actx->auth_mode, aeadctx->hmac_ctrl,
+					IV >> 1);
+	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+					 0, 0, dst_size);
+
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	if (reqctx->op == CHCR_ENCRYPT_OP ||
+		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
+		memcpy(chcr_req->key_ctx.key, aeadctx->key,
+		       aeadctx->enckey_len);
+	else
+		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+		       aeadctx->enckey_len);
+
+	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+		memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+				CTR_RFC3686_IV_SIZE);
+		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+	} else {
+		memcpy(reqctx->iv, req->iv, IV);
+	}
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+	chcr_add_aead_src_ent(req, ulptx, assoclen);
+	atomic_inc(&adap->chcr_stats.cipher_rqst);
+	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+		   transhdr_len, temp, 0);
+	reqctx->skb = skb;
+
+	return skb;
+err:
+	chcr_aead_common_exit(req);
+
+	return ERR_PTR(error);
+}
+
+int chcr_aead_dma_map(struct device *dev,
+		      struct aead_request *req,
+		      unsigned short op_type)
+{
+	int error;
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int dst_size;
+
+	dst_size = req->assoclen + req->cryptlen + (op_type ?
+				-authsize : authsize);
+	if (!req->cryptlen || !dst_size)
+		return 0;
+	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
+					DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, reqctx->iv_dma))
+		return -ENOMEM;
+	if (reqctx->b0_len)
+		reqctx->b0_dma = reqctx->iv_dma + IV;
+	else
+		reqctx->b0_dma = 0;
+	if (req->src == req->dst) {
+		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+				   DMA_BIDIRECTIONAL);
+		if (!error)
+			goto err;
+	} else {
+		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+		if (!error)
+			goto err;
+		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+				   DMA_FROM_DEVICE);
+		if (!error) {
+			dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+	return -ENOMEM;
+}
+
+void chcr_aead_dma_unmap(struct device *dev,
+			 struct aead_request *req,
+			 unsigned short op_type)
+{
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int dst_size;
+
+	dst_size = req->assoclen + req->cryptlen + (op_type ?
+					-authsize : authsize);
+	if (!req->cryptlen || !dst_size)
+		return;
+
+	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
+					DMA_BIDIRECTIONAL);
+	if (req->src == req->dst) {
+		dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+				   DMA_FROM_DEVICE);
+	}
+}
+
+void chcr_add_aead_src_ent(struct aead_request *req,
+			   struct ulptx_sgl *ulptx,
+			   unsigned int assoclen)
+{
+	struct ulptx_walk ulp_walk;
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+
+	if (reqctx->imm) {
+		u8 *buf = (u8 *)ulptx;
+
+		if (reqctx->b0_len) {
+			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
+			buf += reqctx->b0_len;
+		}
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+				   buf, assoclen, 0);
+		buf += assoclen;
+		memcpy(buf, reqctx->iv, IV);
+		buf += IV;
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+				   buf, req->cryptlen, req->assoclen);
+	} else {
+		ulptx_walk_init(&ulp_walk, ulptx);
+		if (reqctx->b0_len)
+			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
+					    &reqctx->b0_dma);
+		ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
+		ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
+				  req->assoclen);
+		ulptx_walk_end(&ulp_walk);
+	}
+}
+
+void chcr_add_aead_dst_ent(struct aead_request *req,
+			   struct cpl_rx_phys_dsgl *phys_cpl,
+			   unsigned int assoclen,
+			   unsigned short qid)
+{
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct dsgl_walk dsgl_walk;
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	struct chcr_context *ctx = a_ctx(tfm);
+	u32 temp;
+
+	dsgl_walk_init(&dsgl_walk, phys_cpl);
+	if (reqctx->b0_len)
+		dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
+	dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
+	dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+	temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
+	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
+}
+
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+			     void *ulptx,
+			     struct  cipher_wr_param *wrparam)
+{
+	struct ulptx_walk ulp_walk;
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	u8 *buf = ulptx;
+
+	memcpy(buf, reqctx->iv, IV);
+	buf += IV;
+	if (reqctx->imm) {
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+				   buf, wrparam->bytes, reqctx->processed);
+	} else {
+		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
+		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
+				  reqctx->src_ofst);
+		reqctx->srcsg = ulp_walk.last_sg;
+		reqctx->src_ofst = ulp_walk.last_sg_len;
+		ulptx_walk_end(&ulp_walk);
+	}
+}
+
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+			     struct cpl_rx_phys_dsgl *phys_cpl,
+			     struct  cipher_wr_param *wrparam,
+			     unsigned short qid)
+{
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+	struct chcr_context *ctx = c_ctx(tfm);
+	struct dsgl_walk dsgl_walk;
+
+	dsgl_walk_init(&dsgl_walk, phys_cpl);
+	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
+			 reqctx->dst_ofst);
+	reqctx->dstsg = dsgl_walk.last_sg;
+	reqctx->dst_ofst = dsgl_walk.last_sg_len;
+
+	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
+}
+
+void chcr_add_hash_src_ent(struct ahash_request *req,
+			   struct ulptx_sgl *ulptx,
+			   struct hash_wr_param *param)
+{
+	struct ulptx_walk ulp_walk;
+	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+
+	if (reqctx->hctx_wr.imm) {
+		u8 *buf = (u8 *)ulptx;
+
+		if (param->bfr_len) {
+			memcpy(buf, reqctx->reqbfr, param->bfr_len);
+			buf += param->bfr_len;
+		}
+
+		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
+				   sg_nents(reqctx->hctx_wr.srcsg), buf,
+				   param->sg_len, 0);
+	} else {
+		ulptx_walk_init(&ulp_walk, ulptx);
+		if (param->bfr_len)
+			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
+					    &reqctx->hctx_wr.dma_addr);
+		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
+				  param->sg_len, reqctx->hctx_wr.src_ofst);
+		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
+		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
+		ulptx_walk_end(&ulp_walk);
+	}
+}
+
+int chcr_hash_dma_map(struct device *dev,
+		      struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+	int error = 0;
+
+	if (!req->nbytes)
+		return 0;
+	error = dma_map_sg(dev, req->src, sg_nents(req->src),
+			   DMA_TO_DEVICE);
+	if (!error)
+		return -ENOMEM;
+	req_ctx->hctx_wr.is_sg_map = 1;
+	return 0;
+}
+
+void chcr_hash_dma_unmap(struct device *dev,
+			 struct ahash_request *req)
+{
+	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+	if (!req->nbytes)
+		return;
+
+	dma_unmap_sg(dev, req->src, sg_nents(req->src),
+			   DMA_TO_DEVICE);
+	req_ctx->hctx_wr.is_sg_map = 0;
+
+}
+
+int chcr_cipher_dma_map(struct device *dev,
+			struct ablkcipher_request *req)
+{
+	int error;
+
+	if (req->src == req->dst) {
+		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+				   DMA_BIDIRECTIONAL);
+		if (!error)
+			goto err;
+	} else {
+		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+		if (!error)
+			goto err;
+		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+				   DMA_FROM_DEVICE);
+		if (!error) {
+			dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	return -ENOMEM;
+}
+
+void chcr_cipher_dma_unmap(struct device *dev,
+			   struct ablkcipher_request *req)
+{
+	if (req->src == req->dst) {
+		dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(dev, req->src, sg_nents(req->src),
+				   DMA_TO_DEVICE);
+		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+				   DMA_FROM_DEVICE);
+	}
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (unsigned int)(1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+			struct chcr_aead_ctx *aeadctx,
+			unsigned short op_type)
+{
+	unsigned int l, lp, m;
+	int rc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	u8 *b0 = reqctx->scratch_pad;
+
+	m = crypto_aead_authsize(aead);
+
+	memcpy(b0, reqctx->iv, 16);
+
+	lp = b0[0];
+	l = lp + 1;
+
+	/* set m, bits 3-5 */
+	*b0 |= (8 * ((m - 2) / 2));
+
+	/* set adata, bit 6, if associated data is used */
+	if (req->assoclen)
+		*b0 |= 64;
+	rc = set_msg_len(b0 + 16 - l,
+			 (op_type == CHCR_DECRYPT_OP) ?
+			 req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+	/* 2 <= L <= 8, so 1 <= L' <= 7. */
+	if (iv[0] < 1 || iv[0] > 7)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+			     struct chcr_aead_ctx *aeadctx,
+			     unsigned int sub_type,
+			     unsigned short op_type,
+			     unsigned int assoclen)
+{
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	int rc = 0;
+
+	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+		reqctx->iv[0] = 3;
+		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+		memcpy(reqctx->iv + 4, req->iv, 8);
+		memset(reqctx->iv + 12, 0, 4);
+	} else {
+		memcpy(reqctx->iv, req->iv, 16);
+	}
+	if (assoclen)
+		*((unsigned short *)(reqctx->scratch_pad + 16)) =
+				htons(assoclen);
+
+	generate_b0(req, aeadctx, op_type);
+	/* zero the ctr value */
+	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+	return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+				  unsigned int dst_size,
+				  struct aead_request *req,
+				  unsigned short op_type)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+	unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
+	unsigned int ccm_xtra;
+	unsigned char tag_offset = 0, auth_offset = 0;
+	unsigned int assoclen;
+
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+		assoclen = req->assoclen - 8;
+	else
+		assoclen = req->assoclen;
+	ccm_xtra = CCM_B0_SIZE +
+		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+	auth_offset = req->cryptlen ?
+		(assoclen + IV + 1 + ccm_xtra) : 0;
+	if (op_type == CHCR_DECRYPT_OP) {
+		if (crypto_aead_authsize(tfm) != req->cryptlen)
+			tag_offset = crypto_aead_authsize(tfm);
+		else
+			auth_offset = 0;
+	}
+
+
+	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+					 2, assoclen + 1 + ccm_xtra);
+	sec_cpl->pldlen =
+		htonl(assoclen + IV + req->cryptlen + ccm_xtra);
+	/* For CCM there wil be b0 always. So AAD start will be 1 always */
+	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					1, assoclen + ccm_xtra, assoclen
+					+ IV + 1 + ccm_xtra, 0);
+
+	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+					auth_offset, tag_offset,
+					(op_type == CHCR_ENCRYPT_OP) ? 0 :
+					crypto_aead_authsize(tfm));
+	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+					cipher_mode, mac_mode,
+					aeadctx->hmac_ctrl, IV >> 1);
+
+	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+					0, dst_size);
+}
+
+static int aead_ccm_validate_input(unsigned short op_type,
+				   struct aead_request *req,
+				   struct chcr_aead_ctx *aeadctx,
+				   unsigned int sub_type)
+{
+	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+		if (crypto_ccm_check_iv(req->iv)) {
+			pr_err("CCM: IV check fails\n");
+			return -EINVAL;
+		}
+	} else {
+		if (req->assoclen != 16 && req->assoclen != 20) {
+			pr_err("RFC4309: Invalid AAD length %d\n",
+			       req->assoclen);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+					  unsigned short qid,
+					  int size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct ulptx_sgl *ulptx;
+	unsigned int transhdr_len;
+	unsigned int dst_size = 0, kctx_len, dnents, temp;
+	unsigned int sub_type, assoclen = req->assoclen;
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int error = -EINVAL;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+	struct adapter *adap = padap(a_ctx(tfm)->dev);
+
+	sub_type = get_aead_subtype(tfm);
+	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+		assoclen -= 8;
+	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
+	error = chcr_aead_common_init(req);
+	if (error)
+		return ERR_PTR(error);
+
+	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
+	if (error)
+		goto err;
+	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+	dnents += sg_nents_xlen(req->dst, req->cryptlen
+			+ (reqctx->op ? -authsize : authsize),
+			CHCR_DST_SG_SIZE, req->assoclen);
+	dnents += MIN_CCM_SG; // For IV and B0
+	dst_size = get_space_for_phys_dsgl(dnents);
+	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
+	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
+				     reqctx->b0_len, 16) :
+		(sgl_len(reqctx->src_nents + reqctx->aad_nents +
+				    MIN_CCM_SG) *  8);
+	transhdr_len += temp;
+	transhdr_len = roundup(transhdr_len, 16);
+
+	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
+				    reqctx->b0_len, transhdr_len, reqctx->op)) {
+		atomic_inc(&adap->chcr_stats.fallback);
+		chcr_aead_common_exit(req);
+		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+	}
+	skb = alloc_skb(SGE_MAX_WR_LEN,  flags);
+
+	if (!skb) {
+		error = -ENOMEM;
+		goto err;
+	}
+
+	chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
+
+	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
+
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+			aeadctx->key, aeadctx->enckey_len);
+
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+	error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
+	if (error)
+		goto dstmap_fail;
+	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+	chcr_add_aead_src_ent(req, ulptx, assoclen);
+
+	atomic_inc(&adap->chcr_stats.aead_rqst);
+	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+		reqctx->b0_len) : 0);
+	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
+		    transhdr_len, temp, 0);
+	reqctx->skb = skb;
+
+	return skb;
+dstmap_fail:
+	kfree_skb(skb);
+err:
+	chcr_aead_common_exit(req);
+	return ERR_PTR(error);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+				     unsigned short qid,
+				     int size)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct ulptx_sgl *ulptx;
+	unsigned int transhdr_len, dnents = 0;
+	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int error = -EINVAL;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+	struct adapter *adap = padap(a_ctx(tfm)->dev);
+
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+		assoclen = req->assoclen - 8;
+
+	reqctx->b0_len = 0;
+	error = chcr_aead_common_init(req);
+	if (error)
+		return ERR_PTR(error);
+	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+	dnents += sg_nents_xlen(req->dst, req->cryptlen +
+				(reqctx->op ? -authsize : authsize),
+				CHCR_DST_SG_SIZE, req->assoclen);
+	dnents += MIN_GCM_SG; // For IV
+	dst_size = get_space_for_phys_dsgl(dnents);
+	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+			SGE_MAX_WR_LEN;
+	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
+		(sgl_len(reqctx->src_nents +
+		reqctx->aad_nents + MIN_GCM_SG) * 8);
+	transhdr_len += temp;
+	transhdr_len = roundup(transhdr_len, 16);
+	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+			    transhdr_len, reqctx->op)) {
+
+		atomic_inc(&adap->chcr_stats.fallback);
+		chcr_aead_common_exit(req);
+		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+	}
+	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+	if (!skb) {
+		error = -ENOMEM;
+		goto err;
+	}
+
+	chcr_req = __skb_put_zero(skb, transhdr_len);
+
+	//Offset of tag from end
+	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
+	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+					a_ctx(tfm)->dev->rx_channel_id, 2,
+					(assoclen + 1));
+	chcr_req->sec_cpl.pldlen =
+		htonl(assoclen + IV + req->cryptlen);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					assoclen ? 1 : 0, assoclen,
+					assoclen + IV + 1, 0);
+	chcr_req->sec_cpl.cipherstop_lo_authinsert =
+			FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+						temp, temp);
+	chcr_req->sec_cpl.seqno_numivs =
+			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
+					CHCR_ENCRYPT_OP) ? 1 : 0,
+					CHCR_SCMD_CIPHER_MODE_AES_GCM,
+					CHCR_SCMD_AUTH_MODE_GHASH,
+					aeadctx->hmac_ctrl, IV >> 1);
+	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+					0, 0, dst_size);
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+	/* prepare a 16 byte iv */
+	/* S   A   L  T |  IV | 0x00000001 */
+	if (get_aead_subtype(tfm) ==
+	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+		memcpy(reqctx->iv, aeadctx->salt, 4);
+		memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
+	} else {
+		memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
+	}
+	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+
+	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+	chcr_add_aead_src_ent(req, ulptx, assoclen);
+	atomic_inc(&adap->chcr_stats.aead_rqst);
+	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+		    transhdr_len, temp, reqctx->verify);
+	reqctx->skb = skb;
+	return skb;
+
+err:
+	chcr_aead_common_exit(req);
+	return ERR_PTR(error);
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+
+	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
+					       CRYPTO_ALG_NEED_FALLBACK |
+					       CRYPTO_ALG_ASYNC);
+	if  (IS_ERR(aeadctx->sw_cipher))
+		return PTR_ERR(aeadctx->sw_cipher);
+	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
+				 sizeof(struct aead_request) +
+				 crypto_aead_reqsize(aeadctx->sw_cipher)));
+	return chcr_device_init(a_ctx(tfm));
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+	crypto_free_aead(aeadctx->sw_cipher);
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+					unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+	aeadctx->mayverify = VERIFY_HW;
+	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+				    unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+	 * true for sha1. authsize == 12 condition should be before
+	 * authsize == (maxauth >> 1)
+	 */
+	if (authsize == ICV_4) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_6) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_10) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_12) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_14) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == (maxauth >> 1)) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == maxauth) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+	} else {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_SW;
+	}
+	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_4:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		 aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_14:
+		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		 aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_13:
+	case ICV_15:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_SW;
+		break;
+	default:
+
+		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+			CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+					  unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	default:
+		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+				unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_4:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_6:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_10:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_14:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	default:
+		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+
+static int chcr_ccm_common_setkey(struct crypto_aead *aead,
+				const u8 *key,
+				unsigned int keylen)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
+	unsigned char ck_size, mk_size;
+	int key_ctx_size = 0;
+
+	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
+	if (keylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+	} else if (keylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+	} else if (keylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+	} else {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		aeadctx->enckey_len = 0;
+		return	-EINVAL;
+	}
+	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+						key_ctx_size >> 4);
+	memcpy(aeadctx->key, key, keylen);
+	aeadctx->enckey_len = keylen;
+
+	return 0;
+}
+
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+				const u8 *key,
+				unsigned int keylen)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
+	int error;
+
+	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
+			      CRYPTO_TFM_REQ_MASK);
+	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
+			      CRYPTO_TFM_RES_MASK);
+	if (error)
+		return error;
+	return chcr_ccm_common_setkey(aead, key, keylen);
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+				    unsigned int keylen)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
+	int error;
+
+	if (keylen < 3) {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		aeadctx->enckey_len = 0;
+		return	-EINVAL;
+	}
+	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
+			      CRYPTO_TFM_REQ_MASK);
+	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
+			      CRYPTO_TFM_RES_MASK);
+	if (error)
+		return error;
+	keylen -= 3;
+	memcpy(aeadctx->salt, key + keylen, 3);
+	return chcr_ccm_common_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+			   unsigned int keylen)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
+	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+	struct crypto_cipher *cipher;
+	unsigned int ck_size;
+	int ret = 0, key_ctx_size = 0;
+
+	aeadctx->enckey_len = 0;
+	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
+			      & CRYPTO_TFM_REQ_MASK);
+	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
+			      CRYPTO_TFM_RES_MASK);
+	if (ret)
+		goto out;
+
+	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+	    keylen > 3) {
+		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
+		memcpy(aeadctx->salt, key + keylen, 4);
+	}
+	if (keylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		pr_err("GCM: Invalid key length %d\n", keylen);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(aeadctx->key, key, keylen);
+	aeadctx->enckey_len = keylen;
+	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
+		AEAD_H_SIZE;
+	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+						CHCR_KEYCTX_MAC_KEY_SIZE_128,
+						0, 0,
+						key_ctx_size >> 4);
+	/* Calculate the H = CIPH(K, 0 repeated 16 times).
+	 * It will go in key context
+	 */
+	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+	if (IS_ERR(cipher)) {
+		aeadctx->enckey_len = 0;
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = crypto_cipher_setkey(cipher, key, keylen);
+	if (ret) {
+		aeadctx->enckey_len = 0;
+		goto out1;
+	}
+	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
+
+out1:
+	crypto_free_cipher(cipher);
+out:
+	return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+				   unsigned int keylen)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	/* it contains auth and cipher key both*/
+	struct crypto_authenc_keys keys;
+	unsigned int bs, subtype;
+	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+	int err = 0, i, key_ctx_len = 0;
+	unsigned char ck_size = 0;
+	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
+	struct algo_param param;
+	int align;
+	u8 *o_ptr = NULL;
+
+	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
+			      & CRYPTO_TFM_REQ_MASK);
+	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
+			      & CRYPTO_TFM_RES_MASK);
+	if (err)
+		goto out;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		goto out;
+	}
+
+	if (get_alg_config(&param, max_authsize)) {
+		pr_err("chcr : Unsupported digest size\n");
+		goto out;
+	}
+	subtype = get_aead_subtype(authenc);
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+			goto out;
+		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+	if (keys.enckeylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keys.enckeylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keys.enckeylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		pr_err("chcr : Unsupported cipher key\n");
+		goto out;
+	}
+
+	/* Copy only encryption key. We use authkey to generate h(ipad) and
+	 * h(opad) so authkey is not needed again. authkeylen size have the
+	 * size of the hash digest size.
+	 */
+	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+	aeadctx->enckey_len = keys.enckeylen;
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+
+		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+			    aeadctx->enckey_len << 3);
+	}
+	base_hash  = chcr_alloc_shash(max_authsize);
+	if (IS_ERR(base_hash)) {
+		pr_err("chcr : Base driver cannot be loaded\n");
+		aeadctx->enckey_len = 0;
+		memzero_explicit(&keys, sizeof(keys));
+		return -EINVAL;
+	}
+	{
+		SHASH_DESC_ON_STACK(shash, base_hash);
+
+		shash->tfm = base_hash;
+		shash->flags = crypto_shash_get_flags(base_hash);
+		bs = crypto_shash_blocksize(base_hash);
+		align = KEYCTX_ALIGN_PAD(max_authsize);
+		o_ptr =  actx->h_iopad + param.result_size + align;
+
+		if (keys.authkeylen > bs) {
+			err = crypto_shash_digest(shash, keys.authkey,
+						  keys.authkeylen,
+						  o_ptr);
+			if (err) {
+				pr_err("chcr : Base driver cannot be loaded\n");
+				goto out;
+			}
+			keys.authkeylen = max_authsize;
+		} else
+			memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+		/* Compute the ipad-digest*/
+		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+		memcpy(pad, o_ptr, keys.authkeylen);
+		for (i = 0; i < bs >> 2; i++)
+			*((unsigned int *)pad + i) ^= IPAD_DATA;
+
+		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+					      max_authsize))
+			goto out;
+		/* Compute the opad-digest */
+		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+		memcpy(pad, o_ptr, keys.authkeylen);
+		for (i = 0; i < bs >> 2; i++)
+			*((unsigned int *)pad + i) ^= OPAD_DATA;
+
+		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+			goto out;
+
+		/* convert the ipad and opad digest to network order */
+		chcr_change_order(actx->h_iopad, param.result_size);
+		chcr_change_order(o_ptr, param.result_size);
+		key_ctx_len = sizeof(struct _key_ctx) +
+			roundup(keys.enckeylen, 16) +
+			(param.result_size + align) * 2;
+		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+						0, 1, key_ctx_len >> 4);
+		actx->auth_mode = param.auth_mode;
+		chcr_free_shash(base_hash);
+
+		memzero_explicit(&keys, sizeof(keys));
+		return 0;
+	}
+out:
+	aeadctx->enckey_len = 0;
+	memzero_explicit(&keys, sizeof(keys));
+	if (!IS_ERR(base_hash))
+		chcr_free_shash(base_hash);
+	return -EINVAL;
+}
+
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+					const u8 *key, unsigned int keylen)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	struct crypto_authenc_keys keys;
+	int err;
+	/* it contains auth and cipher key both*/
+	unsigned int subtype;
+	int key_ctx_len = 0;
+	unsigned char ck_size = 0;
+
+	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
+			      & CRYPTO_TFM_REQ_MASK);
+	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
+			      & CRYPTO_TFM_RES_MASK);
+	if (err)
+		goto out;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		goto out;
+	}
+	subtype = get_aead_subtype(authenc);
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+			goto out;
+		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+	if (keys.enckeylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keys.enckeylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keys.enckeylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
+		goto out;
+	}
+	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+	aeadctx->enckey_len = keys.enckeylen;
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+				aeadctx->enckey_len << 3);
+	}
+	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
+
+	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+						0, key_ctx_len >> 4);
+	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+	memzero_explicit(&keys, sizeof(keys));
+	return 0;
+out:
+	aeadctx->enckey_len = 0;
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int chcr_aead_op(struct aead_request *req,
+			int size,
+			create_wr_t create_wr_fn)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct uld_ctx *u_ctx;
+	struct sk_buff *skb;
+	int isfull = 0;
+
+	if (!a_ctx(tfm)->dev) {
+		pr_err("chcr : %s : No crypto device.\n", __func__);
+		return -ENXIO;
+	}
+	u_ctx = ULD_CTX(a_ctx(tfm));
+	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+				   a_ctx(tfm)->tx_qidx)) {
+		isfull = 1;
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -ENOSPC;
+	}
+
+	/* Form a WR from req */
+	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
+
+	if (IS_ERR(skb) || !skb)
+		return PTR_ERR(skb);
+
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+	chcr_send_wr(skb);
+	return isfull ? -EBUSY : -EINPROGRESS;
+}
+
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+	reqctx->verify = VERIFY_HW;
+	reqctx->op = CHCR_ENCRYPT_OP;
+
+	switch (get_aead_subtype(tfm)) {
+	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
+		return chcr_aead_op(req, 0, create_authenc_wr);
+	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+		return chcr_aead_op(req, 0, create_aead_ccm_wr);
+	default:
+		return chcr_aead_op(req, 0, create_gcm_wr);
+	}
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	int size;
+
+	if (aeadctx->mayverify == VERIFY_SW) {
+		size = crypto_aead_maxauthsize(tfm);
+		reqctx->verify = VERIFY_SW;
+	} else {
+		size = 0;
+		reqctx->verify = VERIFY_HW;
+	}
+	reqctx->op = CHCR_DECRYPT_OP;
+	switch (get_aead_subtype(tfm)) {
+	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
+		return chcr_aead_op(req, size, create_authenc_wr);
+	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+		return chcr_aead_op(req, size, create_aead_ccm_wr);
+	default:
+		return chcr_aead_op(req, size, create_gcm_wr);
+	}
+}
+
+static struct chcr_alg_template driver_algs[] = {
+	/* AES-CBC */
+	{
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
+		.is_registered = 0,
+		.alg.crypto = {
+			.cra_name		= "cbc(aes)",
+			.cra_driver_name	= "cbc-aes-chcr",
+			.cra_blocksize		= AES_BLOCK_SIZE,
+			.cra_init		= chcr_cra_init,
+			.cra_exit		= chcr_cra_exit,
+			.cra_u.ablkcipher	= {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.ivsize		= AES_BLOCK_SIZE,
+				.setkey			= chcr_aes_cbc_setkey,
+				.encrypt		= chcr_aes_encrypt,
+				.decrypt		= chcr_aes_decrypt,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
+		.is_registered = 0,
+		.alg.crypto =   {
+			.cra_name		= "xts(aes)",
+			.cra_driver_name	= "xts-aes-chcr",
+			.cra_blocksize		= AES_BLOCK_SIZE,
+			.cra_init		= chcr_cra_init,
+			.cra_exit		= NULL,
+			.cra_u .ablkcipher = {
+					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+					.ivsize		= AES_BLOCK_SIZE,
+					.setkey		= chcr_aes_xts_setkey,
+					.encrypt	= chcr_aes_encrypt,
+					.decrypt	= chcr_aes_decrypt,
+				}
+			}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
+		.is_registered = 0,
+		.alg.crypto = {
+			.cra_name		= "ctr(aes)",
+			.cra_driver_name	= "ctr-aes-chcr",
+			.cra_blocksize		= 1,
+			.cra_init		= chcr_cra_init,
+			.cra_exit		= chcr_cra_exit,
+			.cra_u.ablkcipher	= {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.ivsize		= AES_BLOCK_SIZE,
+				.setkey		= chcr_aes_ctr_setkey,
+				.encrypt	= chcr_aes_encrypt,
+				.decrypt	= chcr_aes_decrypt,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
+			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
+		.is_registered = 0,
+		.alg.crypto = {
+			.cra_name		= "rfc3686(ctr(aes))",
+			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
+			.cra_blocksize		= 1,
+			.cra_init		= chcr_rfc3686_init,
+			.cra_exit		= chcr_cra_exit,
+			.cra_u.ablkcipher	= {
+				.min_keysize	= AES_MIN_KEY_SIZE +
+					CTR_RFC3686_NONCE_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE +
+					CTR_RFC3686_NONCE_SIZE,
+				.ivsize		= CTR_RFC3686_IV_SIZE,
+				.setkey		= chcr_aes_rfc3686_setkey,
+				.encrypt	= chcr_aes_encrypt,
+				.decrypt	= chcr_aes_decrypt,
+				.geniv          = "seqiv",
+			}
+		}
+	},
+	/* SHA */
+	{
+		.type = CRYPTO_ALG_TYPE_AHASH,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "sha1-chcr",
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AHASH,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "sha256-chcr",
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AHASH,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha224",
+				.cra_driver_name = "sha224-chcr",
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AHASH,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA384_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha384",
+				.cra_driver_name = "sha384-chcr",
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AHASH,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA512_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha512",
+				.cra_driver_name = "sha512-chcr",
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+			}
+		}
+	},
+	/* HMAC */
+	{
+		.type = CRYPTO_ALG_TYPE_HMAC,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha1)",
+				.cra_driver_name = "hmac-sha1-chcr",
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_HMAC,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha224)",
+				.cra_driver_name = "hmac-sha224-chcr",
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_HMAC,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha256)",
+				.cra_driver_name = "hmac-sha256-chcr",
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_HMAC,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA384_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha384)",
+				.cra_driver_name = "hmac-sha384-chcr",
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_HMAC,
+		.is_registered = 0,
+		.alg.hash = {
+			.halg.digestsize = SHA512_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha512)",
+				.cra_driver_name = "hmac-sha512-chcr",
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+			}
+		}
+	},
+	/* Add AEAD Algorithms */
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "gcm(aes)",
+				.cra_driver_name = "gcm-aes-chcr",
+				.cra_blocksize	= 1,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_gcm_ctx),
+			},
+			.ivsize = GCM_AES_IV_SIZE,
+			.maxauthsize = GHASH_DIGEST_SIZE,
+			.setkey = chcr_gcm_setkey,
+			.setauthsize = chcr_gcm_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "rfc4106(gcm(aes))",
+				.cra_driver_name = "rfc4106-gcm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY + 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_gcm_ctx),
+
+			},
+			.ivsize = GCM_RFC4106_IV_SIZE,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_gcm_setkey,
+			.setauthsize	= chcr_4106_4309_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "ccm(aes)",
+				.cra_driver_name = "ccm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_aead_ccm_setkey,
+			.setauthsize	= chcr_ccm_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "rfc4309(ccm(aes))",
+				.cra_driver_name = "rfc4309-ccm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY + 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx),
+
+			},
+			.ivsize = 8,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_aead_rfc4309_setkey,
+			.setauthsize = chcr_4106_4309_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha1-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha256-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize	= SHA256_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha224-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha384-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha512-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(digest_null,cbc(aes))",
+				.cra_driver_name =
+					"authenc-digest_null-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize  = AES_BLOCK_SIZE,
+			.maxauthsize = 0,
+			.setkey  = chcr_aead_digest_null_setkey,
+			.setauthsize = chcr_authenc_null_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+				.cra_driver_name =
+				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+
+				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+				.cra_driver_name =
+				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize	= SHA256_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+				.cra_driver_name =
+				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+			},
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+				.cra_driver_name =
+				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+				.cra_driver_name =
+				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
+				.cra_driver_name =
+				"authenc-digest_null-rfc3686-ctr-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_priority = CHCR_AEAD_PRIORITY,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize  = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = 0,
+			.setkey  = chcr_aead_digest_null_setkey,
+			.setauthsize = chcr_authenc_null_setauthsize,
+		}
+	},
+
+};
+
+/*
+ *	chcr_unregister_alg - Deregister crypto algorithms with
+ *	kernel framework.
+ */
+static int chcr_unregister_alg(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			if (driver_algs[i].is_registered)
+				crypto_unregister_alg(
+						&driver_algs[i].alg.crypto);
+			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			if (driver_algs[i].is_registered)
+				crypto_unregister_aead(
+						&driver_algs[i].alg.aead);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			if (driver_algs[i].is_registered)
+				crypto_unregister_ahash(
+						&driver_algs[i].alg.hash);
+			break;
+		}
+		driver_algs[i].is_registered = 0;
+	}
+	return 0;
+}
+
+#define SZ_AHASH_CTX sizeof(struct chcr_context)
+#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
+#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
+
+/*
+ *	chcr_register_alg - Register crypto algorithms with kernel framework.
+ */
+static int chcr_register_alg(void)
+{
+	struct crypto_alg ai;
+	struct ahash_alg *a_hash;
+	int err = 0, i;
+	char *name = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		if (driver_algs[i].is_registered)
+			continue;
+		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			driver_algs[i].alg.crypto.cra_priority =
+				CHCR_CRA_PRIORITY;
+			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
+			driver_algs[i].alg.crypto.cra_flags =
+				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_NEED_FALLBACK;
+			driver_algs[i].alg.crypto.cra_ctxsize =
+				sizeof(struct chcr_context) +
+				sizeof(struct ablk_ctx);
+			driver_algs[i].alg.crypto.cra_alignmask = 0;
+			driver_algs[i].alg.crypto.cra_type =
+				&crypto_ablkcipher_type;
+			err = crypto_register_alg(&driver_algs[i].alg.crypto);
+			name = driver_algs[i].alg.crypto.cra_driver_name;
+			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			driver_algs[i].alg.aead.base.cra_flags =
+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+			err = crypto_register_aead(&driver_algs[i].alg.aead);
+			name = driver_algs[i].alg.aead.base.cra_driver_name;
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			a_hash = &driver_algs[i].alg.hash;
+			a_hash->update = chcr_ahash_update;
+			a_hash->final = chcr_ahash_final;
+			a_hash->finup = chcr_ahash_finup;
+			a_hash->digest = chcr_ahash_digest;
+			a_hash->export = chcr_ahash_export;
+			a_hash->import = chcr_ahash_import;
+			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
+			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
+			a_hash->halg.base.cra_module = THIS_MODULE;
+			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+			a_hash->halg.base.cra_alignmask = 0;
+			a_hash->halg.base.cra_exit = NULL;
+
+			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
+				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
+				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
+				a_hash->init = chcr_hmac_init;
+				a_hash->setkey = chcr_ahash_setkey;
+				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
+			} else {
+				a_hash->init = chcr_sha_init;
+				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
+				a_hash->halg.base.cra_init = chcr_sha_cra_init;
+			}
+			err = crypto_register_ahash(&driver_algs[i].alg.hash);
+			ai = driver_algs[i].alg.hash.halg.base;
+			name = ai.cra_driver_name;
+			break;
+		}
+		if (err) {
+			pr_err("chcr : %s : Algorithm registration failed\n",
+			       name);
+			goto register_err;
+		} else {
+			driver_algs[i].is_registered = 1;
+		}
+	}
+	return 0;
+
+register_err:
+	chcr_unregister_alg();
+	return err;
+}
+
+/*
+ *	start_crypto - Register the crypto algorithms.
+ *	This should called once when the first device comesup. After this
+ *	kernel will start calling driver APIs for crypto operations.
+ */
+int start_crypto(void)
+{
+	return chcr_register_alg();
+}
+
+/*
+ *	stop_crypto - Deregister all the crypto algorithms with kernel.
+ *	This should be called once when the last device goes down. After this
+ *	kernel will not call the driver API for crypto operations.
+ */
+int stop_crypto(void)
+{
+	chcr_unregister_alg();
+	return 0;
+}
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
new file mode 100644
index 0000000..1871500
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -0,0 +1,442 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_ALGO_H__
+#define __CHCR_ALGO_H__
+
+/* Crypto key context */
+#define KEY_CONTEXT_CTX_LEN_S           24
+#define KEY_CONTEXT_CTX_LEN_M           0xff
+#define KEY_CONTEXT_CTX_LEN_V(x)        ((x) << KEY_CONTEXT_CTX_LEN_S)
+#define KEY_CONTEXT_CTX_LEN_G(x) \
+	(((x) >> KEY_CONTEXT_CTX_LEN_S) & KEY_CONTEXT_CTX_LEN_M)
+
+#define KEY_CONTEXT_DUAL_CK_S      12
+#define KEY_CONTEXT_DUAL_CK_M      0x1
+#define KEY_CONTEXT_DUAL_CK_V(x)   ((x) << KEY_CONTEXT_DUAL_CK_S)
+#define KEY_CONTEXT_DUAL_CK_G(x)   \
+(((x) >> KEY_CONTEXT_DUAL_CK_S) & KEY_CONTEXT_DUAL_CK_M)
+#define KEY_CONTEXT_DUAL_CK_F      KEY_CONTEXT_DUAL_CK_V(1U)
+
+#define KEY_CONTEXT_SALT_PRESENT_S      10
+#define KEY_CONTEXT_SALT_PRESENT_M      0x1
+#define KEY_CONTEXT_SALT_PRESENT_V(x)   ((x) << KEY_CONTEXT_SALT_PRESENT_S)
+#define KEY_CONTEXT_SALT_PRESENT_G(x)   \
+	(((x) >> KEY_CONTEXT_SALT_PRESENT_S) & \
+	 KEY_CONTEXT_SALT_PRESENT_M)
+#define KEY_CONTEXT_SALT_PRESENT_F      KEY_CONTEXT_SALT_PRESENT_V(1U)
+
+#define KEY_CONTEXT_VALID_S     0
+#define KEY_CONTEXT_VALID_M     0x1
+#define KEY_CONTEXT_VALID_V(x)  ((x) << KEY_CONTEXT_VALID_S)
+#define KEY_CONTEXT_VALID_G(x)  \
+	(((x) >> KEY_CONTEXT_VALID_S) & \
+	 KEY_CONTEXT_VALID_M)
+#define KEY_CONTEXT_VALID_F     KEY_CONTEXT_VALID_V(1U)
+
+#define KEY_CONTEXT_CK_SIZE_S           6
+#define KEY_CONTEXT_CK_SIZE_M           0xf
+#define KEY_CONTEXT_CK_SIZE_V(x)        ((x) << KEY_CONTEXT_CK_SIZE_S)
+#define KEY_CONTEXT_CK_SIZE_G(x)        \
+	(((x) >> KEY_CONTEXT_CK_SIZE_S) & KEY_CONTEXT_CK_SIZE_M)
+
+#define KEY_CONTEXT_MK_SIZE_S           2
+#define KEY_CONTEXT_MK_SIZE_M           0xf
+#define KEY_CONTEXT_MK_SIZE_V(x)        ((x) << KEY_CONTEXT_MK_SIZE_S)
+#define KEY_CONTEXT_MK_SIZE_G(x)        \
+	(((x) >> KEY_CONTEXT_MK_SIZE_S) & KEY_CONTEXT_MK_SIZE_M)
+
+#define KEY_CONTEXT_OPAD_PRESENT_S      11
+#define KEY_CONTEXT_OPAD_PRESENT_M      0x1
+#define KEY_CONTEXT_OPAD_PRESENT_V(x)   ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
+#define KEY_CONTEXT_OPAD_PRESENT_G(x)   \
+	(((x) >> KEY_CONTEXT_OPAD_PRESENT_S) & \
+	 KEY_CONTEXT_OPAD_PRESENT_M)
+#define KEY_CONTEXT_OPAD_PRESENT_F      KEY_CONTEXT_OPAD_PRESENT_V(1U)
+
+#define TLS_KEYCTX_RXFLIT_CNT_S 24
+#define TLS_KEYCTX_RXFLIT_CNT_V(x) ((x) << TLS_KEYCTX_RXFLIT_CNT_S)
+
+#define TLS_KEYCTX_RXPROT_VER_S 20
+#define TLS_KEYCTX_RXPROT_VER_M 0xf
+#define TLS_KEYCTX_RXPROT_VER_V(x) ((x) << TLS_KEYCTX_RXPROT_VER_S)
+
+#define TLS_KEYCTX_RXCIPH_MODE_S 16
+#define TLS_KEYCTX_RXCIPH_MODE_M 0xf
+#define TLS_KEYCTX_RXCIPH_MODE_V(x) ((x) << TLS_KEYCTX_RXCIPH_MODE_S)
+
+#define TLS_KEYCTX_RXAUTH_MODE_S 12
+#define TLS_KEYCTX_RXAUTH_MODE_M 0xf
+#define TLS_KEYCTX_RXAUTH_MODE_V(x) ((x) << TLS_KEYCTX_RXAUTH_MODE_S)
+
+#define TLS_KEYCTX_RXCIAU_CTRL_S 11
+#define TLS_KEYCTX_RXCIAU_CTRL_V(x) ((x) << TLS_KEYCTX_RXCIAU_CTRL_S)
+
+#define TLS_KEYCTX_RX_SEQCTR_S 9
+#define TLS_KEYCTX_RX_SEQCTR_M 0x3
+#define TLS_KEYCTX_RX_SEQCTR_V(x) ((x) << TLS_KEYCTX_RX_SEQCTR_S)
+
+#define TLS_KEYCTX_RX_VALID_S 8
+#define TLS_KEYCTX_RX_VALID_V(x) ((x) << TLS_KEYCTX_RX_VALID_S)
+
+#define TLS_KEYCTX_RXCK_SIZE_S 3
+#define TLS_KEYCTX_RXCK_SIZE_M 0x7
+#define TLS_KEYCTX_RXCK_SIZE_V(x) ((x) << TLS_KEYCTX_RXCK_SIZE_S)
+
+#define TLS_KEYCTX_RXMK_SIZE_S 0
+#define TLS_KEYCTX_RXMK_SIZE_M 0x7
+#define TLS_KEYCTX_RXMK_SIZE_V(x) ((x) << TLS_KEYCTX_RXMK_SIZE_S)
+
+#define CHCR_HASH_MAX_DIGEST_SIZE 64
+#define CHCR_MAX_SHA_DIGEST_SIZE 64
+
+#define IPSEC_TRUNCATED_ICV_SIZE 12
+#define TLS_TRUNCATED_HMAC_SIZE 10
+#define CBCMAC_DIGEST_SIZE 16
+#define MAX_HASH_NAME 20
+
+#define SHA1_INIT_STATE_5X4B    5
+#define SHA256_INIT_STATE_8X4B  8
+#define SHA512_INIT_STATE_8X8B  8
+#define SHA1_INIT_STATE         SHA1_INIT_STATE_5X4B
+#define SHA224_INIT_STATE       SHA256_INIT_STATE_8X4B
+#define SHA256_INIT_STATE       SHA256_INIT_STATE_8X4B
+#define SHA384_INIT_STATE       SHA512_INIT_STATE_8X8B
+#define SHA512_INIT_STATE       SHA512_INIT_STATE_8X8B
+
+#define DUMMY_BYTES 16
+
+#define IPAD_DATA 0x36363636
+#define OPAD_DATA 0x5c5c5c5c
+
+#define TRANSHDR_SIZE(kctx_len)\
+	(sizeof(struct chcr_wr) +\
+	 kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+	(TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
+	 sizeof(struct cpl_rx_phys_dsgl) + AES_BLOCK_SIZE)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+	(TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
+
+
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst)      \
+	htonl( \
+	       CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
+	       CPL_TX_SEC_PDU_RXCHID_V((id)) | \
+	       CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
+	       CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
+	       CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
+	       CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
+	       CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
+
+#define  FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
+	htonl( \
+	       CPL_TX_SEC_PDU_AADSTART_V((a_start)) | \
+	       CPL_TX_SEC_PDU_AADSTOP_V((a_stop)) | \
+	       CPL_TX_SEC_PDU_CIPHERSTART_V((c_start)) | \
+	       CPL_TX_SEC_PDU_CIPHERSTOP_HI_V((c_stop_hi)))
+
+#define  FILL_SEC_CPL_AUTHINSERT(c_stop_lo, a_start, a_stop, a_inst) \
+	htonl( \
+	       CPL_TX_SEC_PDU_CIPHERSTOP_LO_V((c_stop_lo)) | \
+		CPL_TX_SEC_PDU_AUTHSTART_V((a_start)) | \
+		CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
+		CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
+
+#define  FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size)  \
+		htonl( \
+		SCMD_SEQ_NO_CTRL_V(0) | \
+		SCMD_STATUS_PRESENT_V(0) | \
+		SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) | \
+		SCMD_ENC_DEC_CTRL_V((ctrl)) | \
+		SCMD_CIPH_AUTH_SEQ_CTRL_V((seq)) | \
+		SCMD_CIPH_MODE_V((cmode)) | \
+		SCMD_AUTH_MODE_V((amode)) | \
+		SCMD_HMAC_CTRL_V((opad)) | \
+		SCMD_IV_SIZE_V((size)) | \
+		SCMD_NUM_IVS_V(0))
+
+#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
+		SCMD_ENB_DBGID_V(0) | \
+		SCMD_IV_GEN_CTRL_V(0) | \
+		SCMD_LAST_FRAG_V((last)) | \
+		SCMD_MORE_FRAGS_V((more)) | \
+		SCMD_TLS_COMPPDU_V(0) | \
+		SCMD_KEY_CTX_INLINE_V((ctx_in)) | \
+		SCMD_TLS_FRAG_ENABLE_V(0) | \
+		SCMD_MAC_ONLY_V((mac)) |  \
+		SCMD_AADIVDROP_V((ivdrop)) | \
+		SCMD_HDR_LEN_V((len)))
+
+#define  FILL_KEY_CTX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \
+		htonl(KEY_CONTEXT_VALID_V(1) | \
+		      KEY_CONTEXT_CK_SIZE_V((ck_size)) | \
+		      KEY_CONTEXT_MK_SIZE_V(mk_size) | \
+		      KEY_CONTEXT_DUAL_CK_V((d_ck)) | \
+		      KEY_CONTEXT_OPAD_PRESENT_V((opad)) | \
+		      KEY_CONTEXT_SALT_PRESENT_V(1) | \
+		      KEY_CONTEXT_CTX_LEN_V((ctx_len)))
+
+#define  FILL_KEY_CRX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \
+		htonl(TLS_KEYCTX_RXMK_SIZE_V(mk_size) | \
+		      TLS_KEYCTX_RXCK_SIZE_V(ck_size) | \
+		      TLS_KEYCTX_RX_VALID_V(1) | \
+		      TLS_KEYCTX_RX_SEQCTR_V(3) | \
+		      TLS_KEYCTX_RXAUTH_MODE_V(4) | \
+		      TLS_KEYCTX_RXCIPH_MODE_V(2) | \
+		      TLS_KEYCTX_RXFLIT_CNT_V((ctx_len)))
+
+#define FILL_WR_OP_CCTX_SIZE \
+		htonl( \
+			FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
+			FW_CRYPTO_LOOKASIDE_WR) | \
+			FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
+			FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \
+			FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \
+			FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0))
+
+#define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \
+		htonl( \
+			FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
+			FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
+			FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
+			FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \
+			FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
+
+#define FILL_ULPTX_CMD_DEST(cid, qid) \
+	htonl(ULPTX_CMD_V(ULP_TX_PKT) | \
+	      ULP_TXPKT_DEST_V(0) | \
+	      ULP_TXPKT_DATAMODIFY_V(0) | \
+	      ULP_TXPKT_CHANNELID_V((cid)) | \
+	      ULP_TXPKT_RO_V(1) | \
+	      ULP_TXPKT_FID_V(qid))
+
+#define KEYCTX_ALIGN_PAD(bs) ({unsigned int _bs = (bs);\
+			      _bs == SHA1_DIGEST_SIZE ? 12 : 0; })
+
+#define FILL_PLD_SIZE_HASH_SIZE(payload_sgl_len, sgl_lengths, total_frags) \
+	htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(payload_sgl_len ? \
+						sgl_lengths[total_frags] : 0) |\
+	      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(0))
+
+#define FILL_LEN_PKD(calc_tx_flits_ofld, skb) \
+	htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP((\
+					   calc_tx_flits_ofld(skb) * 8), 16)))
+
+#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
+					ULP_TX_SC_MORE_V((immdatalen)))
+#define MAX_NK 8
+#define MAX_DSGL_ENT			32
+#define MIN_AUTH_SG			1 /* IV */
+#define MIN_GCM_SG			1 /* IV */
+#define MIN_DIGEST_SG			1 /*Partial Buffer*/
+#define MIN_CCM_SG			2 /*IV+B0*/
+#define CIP_SPACE_LEFT(len) \
+	((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
+#define HASH_SPACE_LEFT(len) \
+	((SGE_MAX_WR_LEN - HASH_WR_MIN_LEN - (len)))
+
+struct algo_param {
+	unsigned int auth_mode;
+	unsigned int mk_size;
+	unsigned int result_size;
+};
+
+struct hash_wr_param {
+	struct algo_param alg_prm;
+	unsigned int opad_needed;
+	unsigned int more;
+	unsigned int last;
+	unsigned int kctx_len;
+	unsigned int sg_len;
+	unsigned int bfr_len;
+	unsigned int hash_size;
+	u64 scmd1;
+};
+
+struct cipher_wr_param {
+	struct ablkcipher_request *req;
+	char *iv;
+	int bytes;
+	unsigned short qid;
+};
+enum {
+	AES_KEYLENGTH_128BIT = 128,
+	AES_KEYLENGTH_192BIT = 192,
+	AES_KEYLENGTH_256BIT = 256
+};
+
+enum {
+	KEYLENGTH_3BYTES = 3,
+	KEYLENGTH_4BYTES = 4,
+	KEYLENGTH_6BYTES = 6,
+	KEYLENGTH_8BYTES = 8
+};
+
+enum {
+	NUMBER_OF_ROUNDS_10 = 10,
+	NUMBER_OF_ROUNDS_12 = 12,
+	NUMBER_OF_ROUNDS_14 = 14,
+};
+
+/*
+ * CCM defines values of 4, 6, 8, 10, 12, 14, and 16 octets,
+ * where they indicate the size of the integrity check value (ICV)
+ */
+enum {
+	ICV_4  = 4,
+	ICV_6  = 6,
+	ICV_8  = 8,
+	ICV_10 = 10,
+	ICV_12 = 12,
+	ICV_13 = 13,
+	ICV_14 = 14,
+	ICV_15 = 15,
+	ICV_16 = 16
+};
+
+struct phys_sge_pairs {
+	__be16 len[8];
+	__be64 addr[8];
+};
+
+
+static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
+		SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
+};
+
+static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
+		SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+		SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
+};
+
+static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
+		SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+		SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
+};
+
+static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = {
+		SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3,
+		SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7,
+};
+
+static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = {
+		SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3,
+		SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7,
+};
+
+static inline void copy_hash_init_values(char *key, int digestsize)
+{
+	u8 i;
+	__be32 *dkey = (__be32 *)key;
+	u64 *ldkey = (u64 *)key;
+	__be64 *sha384 = (__be64 *)sha384_init;
+	__be64 *sha512 = (__be64 *)sha512_init;
+
+	switch (digestsize) {
+	case SHA1_DIGEST_SIZE:
+		for (i = 0; i < SHA1_INIT_STATE; i++)
+			dkey[i] = cpu_to_be32(sha1_init[i]);
+		break;
+	case SHA224_DIGEST_SIZE:
+		for (i = 0; i < SHA224_INIT_STATE; i++)
+			dkey[i] = cpu_to_be32(sha224_init[i]);
+		break;
+	case SHA256_DIGEST_SIZE:
+		for (i = 0; i < SHA256_INIT_STATE; i++)
+			dkey[i] = cpu_to_be32(sha256_init[i]);
+		break;
+	case SHA384_DIGEST_SIZE:
+		for (i = 0; i < SHA384_INIT_STATE; i++)
+			ldkey[i] = be64_to_cpu(sha384[i]);
+		break;
+	case SHA512_DIGEST_SIZE:
+		for (i = 0; i < SHA512_INIT_STATE; i++)
+			ldkey[i] = be64_to_cpu(sha512[i]);
+		break;
+	}
+}
+
+static const u8 sgl_lengths[20] = {
+	0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15
+};
+
+/* Number of len fields(8) * size of one addr field */
+#define PHYSDSGL_MAX_LEN_SIZE 16
+
+static inline u16 get_space_for_phys_dsgl(unsigned int sgl_entr)
+{
+	/* len field size + addr field size */
+	return ((sgl_entr >> 3) + ((sgl_entr % 8) ?
+				   1 : 0)) * PHYSDSGL_MAX_LEN_SIZE +
+		(sgl_entr << 3) + ((sgl_entr % 2 ? 1 : 0) << 3);
+}
+
+/* The AES s-transform matrix (s-box). */
+static const u8 aes_sbox[256] = {
+	99,  124, 119, 123, 242, 107, 111, 197, 48,  1,   103, 43,  254, 215,
+	171, 118, 202, 130, 201, 125, 250, 89,  71,  240, 173, 212, 162, 175,
+	156, 164, 114, 192, 183, 253, 147, 38,  54,  63,  247, 204, 52,  165,
+	229, 241, 113, 216, 49,  21, 4,   199, 35,  195, 24,  150, 5, 154, 7,
+	18,  128, 226, 235, 39,  178, 117, 9,   131, 44,  26,  27,  110, 90,
+	160, 82,  59,  214, 179, 41,  227, 47,  132, 83,  209, 0,   237, 32,
+	252, 177, 91,  106, 203, 190, 57,  74,  76,  88,  207, 208, 239, 170,
+	251, 67,  77,  51,  133, 69,  249, 2,   127, 80,  60,  159, 168, 81,
+	163, 64,  143, 146, 157, 56,  245, 188, 182, 218, 33,  16,  255, 243,
+	210, 205, 12,  19,  236, 95,  151, 68,  23,  196, 167, 126, 61,  100,
+	93,  25,  115, 96,  129, 79,  220, 34,  42,  144, 136, 70,  238, 184,
+	20,  222, 94,  11,  219, 224, 50,  58,  10,  73,  6,   36,  92,  194,
+	211, 172, 98,  145, 149, 228, 121, 231, 200, 55,  109, 141, 213, 78,
+	169, 108, 86,  244, 234, 101, 122, 174, 8, 186, 120, 37,  46,  28, 166,
+	180, 198, 232, 221, 116, 31,  75,  189, 139, 138, 112, 62,  181, 102,
+	72,  3,   246, 14,  97,  53,  87,  185, 134, 193, 29,  158, 225, 248,
+	152, 17,  105, 217, 142, 148, 155, 30,  135, 233, 206, 85,  40,  223,
+	140, 161, 137, 13,  191, 230, 66,  104, 65,  153, 45,  15,  176, 84,
+	187, 22
+};
+
+static inline u32 aes_ks_subword(const u32 w)
+{
+	u8 bytes[4];
+
+	*(u32 *)(&bytes[0]) = w;
+	bytes[0] = aes_sbox[bytes[0]];
+	bytes[1] = aes_sbox[bytes[1]];
+	bytes[2] = aes_sbox[bytes[2]];
+	bytes[3] = aes_sbox[bytes[3]];
+	return *(u32 *)(&bytes[0]);
+}
+
+#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
new file mode 100644
index 0000000..04f277c
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -0,0 +1,270 @@
+/**
+ * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
+ *
+ * Copyright (C) 2011-2016 Chelsio Communications.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written and Maintained by:
+ * Manoj Malviya (manojmalviya@chelsio.com)
+ * Atul Gupta (atul.gupta@chelsio.com)
+ * Jitendra Lulla (jlulla@chelsio.com)
+ * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
+ * Harsh Jain (harsh@chelsio.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+
+#include "t4_msg.h"
+#include "chcr_core.h"
+#include "cxgb4_uld.h"
+
+static LIST_HEAD(uld_ctx_list);
+static DEFINE_MUTEX(dev_mutex);
+static atomic_t dev_count;
+static struct uld_ctx *ctx_rr;
+
+typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
+static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
+static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
+static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
+
+static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
+	[CPL_FW6_PLD] = cpl_fw6_pld_handler,
+};
+
+static struct cxgb4_uld_info chcr_uld_info = {
+	.name = DRV_MODULE_NAME,
+	.nrxq = MAX_ULD_QSETS,
+	.ntxq = MAX_ULD_QSETS,
+	.rxq_size = 1024,
+	.add = chcr_uld_add,
+	.state_change = chcr_uld_state_change,
+	.rx_handler = chcr_uld_rx_handler,
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+	.tx_handler = chcr_uld_tx_handler,
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+};
+
+struct uld_ctx *assign_chcr_device(void)
+{
+	struct uld_ctx *u_ctx = NULL;
+
+	/*
+	 * When multiple devices are present in system select
+	 * device in round-robin fashion for crypto operations
+	 * Although One session must use the same device to
+	 * maintain request-response ordering.
+	 */
+	mutex_lock(&dev_mutex);
+	if (!list_empty(&uld_ctx_list)) {
+		u_ctx = ctx_rr;
+		if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
+			ctx_rr = list_first_entry(&uld_ctx_list,
+						  struct uld_ctx,
+						  entry);
+		else
+			ctx_rr = list_next_entry(ctx_rr, entry);
+	}
+	mutex_unlock(&dev_mutex);
+	return u_ctx;
+}
+
+static int chcr_dev_add(struct uld_ctx *u_ctx)
+{
+	struct chcr_dev *dev;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENXIO;
+
+	spin_lock_init(&dev->lock_chcr_dev);
+	u_ctx->dev = dev;
+	dev->u_ctx = u_ctx;
+	atomic_inc(&dev_count);
+	mutex_lock(&dev_mutex);
+	list_add_tail(&u_ctx->entry, &uld_ctx_list);
+	if (!ctx_rr)
+		ctx_rr = u_ctx;
+	mutex_unlock(&dev_mutex);
+	return 0;
+}
+
+static int chcr_dev_remove(struct uld_ctx *u_ctx)
+{
+	if (ctx_rr == u_ctx) {
+		if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
+			ctx_rr = list_first_entry(&uld_ctx_list,
+						  struct uld_ctx,
+						  entry);
+		else
+			ctx_rr = list_next_entry(ctx_rr, entry);
+	}
+	list_del(&u_ctx->entry);
+	if (list_empty(&uld_ctx_list))
+		ctx_rr = NULL;
+	kfree(u_ctx->dev);
+	u_ctx->dev = NULL;
+	atomic_dec(&dev_count);
+	return 0;
+}
+
+static int cpl_fw6_pld_handler(struct chcr_dev *dev,
+			       unsigned char *input)
+{
+	struct crypto_async_request *req;
+	struct cpl_fw6_pld *fw6_pld;
+	u32 ack_err_status = 0;
+	int error_status = 0;
+	struct adapter *adap = padap(dev);
+
+	fw6_pld = (struct cpl_fw6_pld *)input;
+	req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
+						    fw6_pld->data[1]);
+
+	ack_err_status =
+		ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
+	if (ack_err_status) {
+		if (CHK_MAC_ERR_BIT(ack_err_status) ||
+		    CHK_PAD_ERR_BIT(ack_err_status))
+			error_status = -EBADMSG;
+		atomic_inc(&adap->chcr_stats.error);
+	}
+	/* call completion callback with failure status */
+	if (req) {
+		error_status = chcr_handle_resp(req, input, error_status);
+	} else {
+		pr_err("Incorrect request address from the firmware\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+int chcr_send_wr(struct sk_buff *skb)
+{
+	return cxgb4_crypto_send(skb->dev, skb);
+}
+
+static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
+{
+	struct uld_ctx *u_ctx;
+
+	/* Create the device and add it in the device list */
+	if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
+		return ERR_PTR(-EOPNOTSUPP);
+
+	/* Create the device and add it in the device list */
+	u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
+	if (!u_ctx) {
+		u_ctx = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+	u_ctx->lldi = *lld;
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+	if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
+		chcr_add_xfrmops(lld);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+out:
+	return u_ctx;
+}
+
+int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
+			const struct pkt_gl *pgl)
+{
+	struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
+	struct chcr_dev *dev = u_ctx->dev;
+	const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
+
+	if (rpl->opcode != CPL_FW6_PLD) {
+		pr_err("Unsupported opcode\n");
+		return 0;
+	}
+
+	if (!pgl)
+		work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
+	else
+		work_handlers[rpl->opcode](dev, pgl->va);
+	return 0;
+}
+
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
+{
+	return chcr_ipsec_xmit(skb, dev);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
+static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
+{
+	struct uld_ctx *u_ctx = handle;
+	int ret = 0;
+
+	switch (state) {
+	case CXGB4_STATE_UP:
+		if (!u_ctx->dev) {
+			ret = chcr_dev_add(u_ctx);
+			if (ret != 0)
+				return ret;
+		}
+		if (atomic_read(&dev_count) == 1)
+			ret = start_crypto();
+		break;
+
+	case CXGB4_STATE_DETACH:
+		if (u_ctx->dev) {
+			mutex_lock(&dev_mutex);
+			chcr_dev_remove(u_ctx);
+			mutex_unlock(&dev_mutex);
+		}
+		if (!atomic_read(&dev_count))
+			stop_crypto();
+		break;
+
+	case CXGB4_STATE_START_RECOVERY:
+	case CXGB4_STATE_DOWN:
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int __init chcr_crypto_init(void)
+{
+	if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
+		pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
+
+	return 0;
+}
+
+static void __exit chcr_crypto_exit(void)
+{
+	struct uld_ctx *u_ctx, *tmp;
+
+	if (atomic_read(&dev_count))
+		stop_crypto();
+
+	/* Remove all devices from list */
+	mutex_lock(&dev_mutex);
+	list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+		if (u_ctx->dev)
+			chcr_dev_remove(u_ctx);
+		kfree(u_ctx);
+	}
+	mutex_unlock(&dev_mutex);
+	cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
+}
+
+module_init(chcr_crypto_init);
+module_exit(chcr_crypto_exit);
+
+MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
new file mode 100644
index 0000000..de3a9c0
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -0,0 +1,195 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_CORE_H__
+#define __CHCR_CORE_H__
+
+#include <crypto/algapi.h>
+#include "t4_hw.h"
+#include "cxgb4.h"
+#include "t4_msg.h"
+#include "cxgb4_uld.h"
+
+#define DRV_MODULE_NAME "chcr"
+#define DRV_VERSION "1.0.0.0"
+
+#define MAX_PENDING_REQ_TO_HW 20
+#define CHCR_TEST_RESPONSE_TIMEOUT 1000
+
+#define PAD_ERROR_BIT		1
+#define CHK_PAD_ERR_BIT(x)	(((x) >> PAD_ERROR_BIT) & 1)
+
+#define MAC_ERROR_BIT		0
+#define CHK_MAC_ERR_BIT(x)	(((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT                4
+#define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \
+		    sizeof(struct cpl_rx_phys_dsgl) + \
+		    sizeof(struct ulptx_sgl) + 16) //IV
+
+#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
+			DUMMY_BYTES + \
+		    sizeof(struct ulptx_sgl))
+
+#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
+
+struct uld_ctx;
+
+struct _key_ctx {
+	__be32 ctx_hdr;
+	u8 salt[MAX_SALT];
+	__be64 iv_to_auth;
+	unsigned char key[0];
+};
+
+#define KEYCTX_TX_WR_IV_S  55
+#define KEYCTX_TX_WR_IV_M  0x1ffULL
+#define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S)
+#define KEYCTX_TX_WR_IV_G(x) \
+	(((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M)
+
+#define KEYCTX_TX_WR_AAD_S 47
+#define KEYCTX_TX_WR_AAD_M 0xffULL
+#define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S)
+#define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \
+				KEYCTX_TX_WR_AAD_M)
+
+#define KEYCTX_TX_WR_AADST_S 39
+#define KEYCTX_TX_WR_AADST_M 0xffULL
+#define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S)
+#define KEYCTX_TX_WR_AADST_G(x) \
+	(((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M)
+
+#define KEYCTX_TX_WR_CIPHER_S 30
+#define KEYCTX_TX_WR_CIPHER_M 0x1ffULL
+#define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S)
+#define KEYCTX_TX_WR_CIPHER_G(x) \
+	(((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M)
+
+#define KEYCTX_TX_WR_CIPHERST_S 23
+#define KEYCTX_TX_WR_CIPHERST_M 0x7f
+#define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S)
+#define KEYCTX_TX_WR_CIPHERST_G(x) \
+	(((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M)
+
+#define KEYCTX_TX_WR_AUTH_S 14
+#define KEYCTX_TX_WR_AUTH_M 0x1ff
+#define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S)
+#define KEYCTX_TX_WR_AUTH_G(x) \
+	(((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M)
+
+#define KEYCTX_TX_WR_AUTHST_S 7
+#define KEYCTX_TX_WR_AUTHST_M 0x7f
+#define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S)
+#define KEYCTX_TX_WR_AUTHST_G(x) \
+	(((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M)
+
+#define KEYCTX_TX_WR_AUTHIN_S 0
+#define KEYCTX_TX_WR_AUTHIN_M 0x7f
+#define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S)
+#define KEYCTX_TX_WR_AUTHIN_G(x) \
+	(((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
+
+struct chcr_wr {
+	struct fw_crypto_lookaside_wr wreq;
+	struct ulp_txpkt ulptx;
+	struct ulptx_idata sc_imm;
+	struct cpl_tx_sec_pdu sec_cpl;
+	struct _key_ctx key_ctx;
+};
+
+struct chcr_dev {
+	spinlock_t lock_chcr_dev;
+	struct uld_ctx *u_ctx;
+	unsigned char tx_channel_id;
+	unsigned char rx_channel_id;
+};
+
+struct uld_ctx {
+	struct list_head entry;
+	struct cxgb4_lld_info lldi;
+	struct chcr_dev *dev;
+};
+
+struct sge_opaque_hdr {
+	void *dev;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+struct chcr_ipsec_req {
+	struct ulp_txpkt ulptx;
+	struct ulptx_idata sc_imm;
+	struct cpl_tx_sec_pdu sec_cpl;
+	struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+	struct fw_ulptx_wr wreq;
+	struct chcr_ipsec_req req;
+};
+
+struct ipsec_sa_entry {
+	int hmac_ctrl;
+	unsigned int enckey_len;
+	unsigned int kctx_len;
+	unsigned int authsize;
+	__be32 key_ctx_hdr;
+	char salt[MAX_SALT];
+	char key[2 * AES_MAX_KEY_SIZE];
+};
+
+/*
+ *      sgl_len - calculates the size of an SGL of the given capacity
+ *      @n: the number of SGL entries
+ *      Calculates the number of flits needed for a scatter/gather list that
+ *      can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	n--;
+	return (3 * n) / 2 + (n & 1) + 2;
+}
+
+struct uld_ctx *assign_chcr_device(void);
+int chcr_send_wr(struct sk_buff *skb);
+int start_crypto(void);
+int stop_crypto(void);
+int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
+			const struct pkt_gl *pgl);
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
+int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
+		     int err);
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
+#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
new file mode 100644
index 0000000..0d2c70c
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -0,0 +1,344 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_CRYPTO_H__
+#define __CHCR_CRYPTO_H__
+
+#define GHASH_BLOCK_SIZE    16
+#define GHASH_DIGEST_SIZE   16
+
+#define CCM_B0_SIZE             16
+#define CCM_AAD_FIELD_SIZE      2
+#define T6_MAX_AAD_SIZE 511
+
+
+/* Define following if h/w is not dropping the AAD and IV data before
+ * giving the processed data
+ */
+
+#define CHCR_CRA_PRIORITY 500
+#define CHCR_AEAD_PRIORITY 6000
+#define CHCR_AES_MAX_KEY_LEN  (2 * (AES_MAX_KEY_SIZE)) /* consider xts */
+#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */
+
+#define CHCR_MAX_AUTHENC_AES_KEY_LEN 32 /* max aes key length*/
+#define CHCR_MAX_AUTHENC_SHA_KEY_LEN 128 /* max sha key length*/
+
+#define CHCR_GIVENCRYPT_OP 2
+/* CPL/SCMD parameters */
+
+#define CHCR_ENCRYPT_OP 0
+#define CHCR_DECRYPT_OP 1
+
+#define CHCR_SCMD_SEQ_NO_CTRL_32BIT     1
+#define CHCR_SCMD_SEQ_NO_CTRL_48BIT     2
+#define CHCR_SCMD_SEQ_NO_CTRL_64BIT     3
+
+#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
+
+#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
+#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
+
+#define CHCR_SCMD_CIPHER_MODE_NOP               0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC           1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM           2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR           3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES       4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS           6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM           7
+
+#define CHCR_SCMD_AUTH_MODE_NOP             0
+#define CHCR_SCMD_AUTH_MODE_SHA1            1
+#define CHCR_SCMD_AUTH_MODE_SHA224          2
+#define CHCR_SCMD_AUTH_MODE_SHA256          3
+#define CHCR_SCMD_AUTH_MODE_GHASH           4
+#define CHCR_SCMD_AUTH_MODE_SHA512_224      5
+#define CHCR_SCMD_AUTH_MODE_SHA512_256      6
+#define CHCR_SCMD_AUTH_MODE_SHA512_384      7
+#define CHCR_SCMD_AUTH_MODE_SHA512_512      8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC          9
+#define CHCR_SCMD_AUTH_MODE_CMAC            10
+
+#define CHCR_SCMD_HMAC_CTRL_NOP             0
+#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC        1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366   2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT     3
+#define CHCR_SCMD_HMAC_CTRL_PL1		    4
+#define CHCR_SCMD_HMAC_CTRL_PL2		    5
+#define CHCR_SCMD_HMAC_CTRL_PL3		    6
+#define CHCR_SCMD_HMAC_CTRL_DIV2	    7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
+
+#define CHCR_SCMD_IVGEN_CTRL_HW             0
+#define CHCR_SCMD_IVGEN_CTRL_SW             1
+/* This are not really mac key size. They are intermediate values
+ * of sha engine and its size
+ */
+#define CHCR_KEYCTX_MAC_KEY_SIZE_128        0
+#define CHCR_KEYCTX_MAC_KEY_SIZE_160        1
+#define CHCR_KEYCTX_MAC_KEY_SIZE_192        2
+#define CHCR_KEYCTX_MAC_KEY_SIZE_256        3
+#define CHCR_KEYCTX_MAC_KEY_SIZE_512        4
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128     0
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192     1
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256     2
+#define CHCR_KEYCTX_NO_KEY                  15
+
+#define CHCR_CPL_FW4_PLD_IV_OFFSET          (5 * 64) /* bytes. flt #5 and #6 */
+#define CHCR_CPL_FW4_PLD_HASH_RESULT_OFFSET (7 * 64) /* bytes. flt #7 */
+#define CHCR_CPL_FW4_PLD_DATA_SIZE          (4 * 64) /* bytes. flt #4 to #7 */
+
+#define KEY_CONTEXT_HDR_SALT_AND_PAD	    16
+#define flits_to_bytes(x)  (x * 8)
+
+#define IV_NOP                  0
+#define IV_IMMEDIATE            1
+#define IV_DSGL			2
+
+#define AEAD_H_SIZE             16
+
+#define CRYPTO_ALG_SUB_TYPE_MASK            0x0f000000
+#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC       0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106    0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM	    0x03000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_SHA	    0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM        0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309    0x06000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_NULL	    0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR             0x08000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686     0x09000000
+#define CRYPTO_ALG_SUB_TYPE_XTS		    0x0a000000
+#define CRYPTO_ALG_SUB_TYPE_CBC		    0x0b000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_SHA	    0x0c000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_NULL   0x0d000000
+#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
+			      CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+
+#define MAX_SCRATCH_PAD_SIZE    32
+
+#define CHCR_HASH_MAX_BLOCK_SIZE_64  64
+#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
+#define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int))
+#define CHCR_DST_SG_SIZE 2048
+
+static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
+{
+	return crypto_aead_ctx(tfm);
+}
+
+static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
+{
+	return crypto_ablkcipher_ctx(tfm);
+}
+
+static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
+{
+	return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
+
+struct ablk_ctx {
+	struct crypto_skcipher *sw_cipher;
+	struct crypto_cipher *aes_generic;
+	__be32 key_ctx_hdr;
+	unsigned int enckey_len;
+	unsigned char ciph_mode;
+	u8 key[CHCR_AES_MAX_KEY_LEN];
+	u8 nonce[4];
+	u8 rrkey[AES_MAX_KEY_SIZE];
+};
+struct chcr_aead_reqctx {
+	struct	sk_buff	*skb;
+	dma_addr_t iv_dma;
+	dma_addr_t b0_dma;
+	unsigned int b0_len;
+	unsigned int op;
+	short int aad_nents;
+	short int src_nents;
+	short int dst_nents;
+	u16 imm;
+	u16 verify;
+	u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
+	u8 *scratch_pad;
+};
+
+struct ulptx_walk {
+	struct ulptx_sgl *sgl;
+	unsigned int nents;
+	unsigned int pair_idx;
+	unsigned int last_sg_len;
+	struct scatterlist *last_sg;
+	struct ulptx_sge_pair *pair;
+
+};
+
+struct dsgl_walk {
+	unsigned int nents;
+	unsigned int last_sg_len;
+	struct scatterlist *last_sg;
+	struct cpl_rx_phys_dsgl *dsgl;
+	struct phys_sge_pairs *to;
+};
+
+struct chcr_gcm_ctx {
+	u8 ghash_h[AEAD_H_SIZE];
+};
+
+struct chcr_authenc_ctx {
+	u8 dec_rrkey[AES_MAX_KEY_SIZE];
+	u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+	unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+	struct chcr_gcm_ctx gcm[0];
+	struct chcr_authenc_ctx authenc[0];
+};
+
+struct chcr_aead_ctx {
+	__be32 key_ctx_hdr;
+	unsigned int enckey_len;
+	struct crypto_aead *sw_cipher;
+	u8 salt[MAX_SALT];
+	u8 key[CHCR_AES_MAX_KEY_LEN];
+	u8 nonce[4];
+	u16 hmac_ctrl;
+	u16 mayverify;
+	struct	__aead_ctx ctx[0];
+};
+
+struct hmac_ctx {
+	struct crypto_shash *base_hash;
+	u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
+	u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
+};
+
+struct __crypto_ctx {
+	struct hmac_ctx hmacctx[0];
+	struct ablk_ctx ablkctx[0];
+	struct chcr_aead_ctx aeadctx[0];
+};
+
+struct chcr_context {
+	struct chcr_dev *dev;
+	unsigned char tx_qidx;
+	unsigned char rx_qidx;
+	unsigned char tx_chan_id;
+	unsigned char pci_chan_id;
+	struct __crypto_ctx crypto_ctx[0];
+};
+
+struct chcr_hctx_per_wr {
+	struct scatterlist *srcsg;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	u32 dma_len;
+	unsigned int src_ofst;
+	unsigned int processed;
+	u32 result;
+	u8 is_sg_map;
+	u8 imm;
+	/*Final callback called. Driver cannot rely on nbytes to decide
+	 * final call
+	 */
+	u8 isfinal;
+};
+
+struct chcr_ahash_req_ctx {
+	struct chcr_hctx_per_wr hctx_wr;
+	u8 *reqbfr;
+	u8 *skbfr;
+	/* SKB which is being sent to the hardware for processing */
+	u64 data_len;  /* Data len till time */
+	u8 reqlen;
+	u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
+	u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+	u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+};
+
+struct chcr_blkcipher_req_ctx {
+	struct sk_buff *skb;
+	struct scatterlist *dstsg;
+	unsigned int processed;
+	unsigned int last_req_len;
+	struct scatterlist *srcsg;
+	unsigned int src_ofst;
+	unsigned int dst_ofst;
+	unsigned int op;
+	u16 imm;
+	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+};
+
+struct chcr_alg_template {
+	u32 type;
+	u32 is_registered;
+	union {
+		struct crypto_alg crypto;
+		struct ahash_alg hash;
+		struct aead_alg aead;
+	} alg;
+};
+
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
+				       unsigned short qid,
+				       int size);
+
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+		      unsigned short op_type);
+void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
+			 unsigned short op_type);
+void chcr_add_aead_dst_ent(struct aead_request *req,
+			   struct cpl_rx_phys_dsgl *phys_cpl,
+			   unsigned int assoclen,
+			   unsigned short qid);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
+			   unsigned int assoclen);
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+			     void *ulptx,
+			     struct  cipher_wr_param *wrparam);
+int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+			     struct cpl_rx_phys_dsgl *phys_cpl,
+			     struct  cipher_wr_param *wrparam,
+			     unsigned short qid);
+int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
+void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
+			   struct hash_wr_param *param);
+int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
+void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
+void chcr_aead_common_exit(struct aead_request *req);
+#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
new file mode 100644
index 0000000..461b97e
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -0,0 +1,652 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ *	Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/esp.h>
+#include <net/xfrm.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+#define GCM_ESP_IV_SIZE     8
+
+static int chcr_xfrm_add_state(struct xfrm_state *x);
+static void chcr_xfrm_del_state(struct xfrm_state *x);
+static void chcr_xfrm_free_state(struct xfrm_state *x);
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+	.xdo_dev_state_add      = chcr_xfrm_add_state,
+	.xdo_dev_state_delete   = chcr_xfrm_del_state,
+	.xdo_dev_state_free     = chcr_xfrm_free_state,
+	.xdo_dev_offload_ok     = chcr_ipsec_offload_ok,
+};
+
+/* Add offload xfrms to Chelsio Interface */
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+{
+	struct net_device *netdev = NULL;
+	int i;
+
+	for (i = 0; i < lld->nports; i++) {
+		netdev = lld->ports[i];
+		if (!netdev)
+			continue;
+		netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
+		netdev->hw_enc_features |= NETIF_F_HW_ESP;
+		netdev->features |= NETIF_F_HW_ESP;
+		rtnl_lock();
+		netdev_change_features(netdev);
+		rtnl_unlock();
+	}
+}
+
+static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
+					 struct ipsec_sa_entry *sa_entry)
+{
+	int hmac_ctrl;
+	int authsize = x->aead->alg_icv_len / 8;
+
+	sa_entry->authsize = authsize;
+
+	switch (authsize) {
+	case ICV_8:
+		hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		break;
+	case ICV_12:
+		hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		break;
+	case ICV_16:
+		hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return hmac_ctrl;
+}
+
+static inline int chcr_ipsec_setkey(struct xfrm_state *x,
+				    struct ipsec_sa_entry *sa_entry)
+{
+	struct crypto_cipher *cipher;
+	int keylen = (x->aead->alg_key_len + 7) / 8;
+	unsigned char *key = x->aead->alg_key;
+	int ck_size, key_ctx_size = 0;
+	unsigned char ghash_h[AEAD_H_SIZE];
+	int ret = 0;
+
+	if (keylen > 3) {
+		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
+		memcpy(sa_entry->salt, key + keylen, 4);
+	}
+
+	if (keylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		pr_err("GCM: Invalid key length %d\n", keylen);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(sa_entry->key, key, keylen);
+	sa_entry->enckey_len = keylen;
+	key_ctx_size = sizeof(struct _key_ctx) +
+			      ((DIV_ROUND_UP(keylen, 16)) << 4) +
+			      AEAD_H_SIZE;
+
+	sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+						 CHCR_KEYCTX_MAC_KEY_SIZE_128,
+						 0, 0,
+						 key_ctx_size >> 4);
+
+	/* Calculate the H = CIPH(K, 0 repeated 16 times).
+	 * It will go in key context
+	 */
+	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+	if (IS_ERR(cipher)) {
+		sa_entry->enckey_len = 0;
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = crypto_cipher_setkey(cipher, key, keylen);
+	if (ret) {
+		sa_entry->enckey_len = 0;
+		goto out1;
+	}
+	memset(ghash_h, 0, AEAD_H_SIZE);
+	crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+	memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
+	       16), ghash_h, AEAD_H_SIZE);
+	sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
+			      AEAD_H_SIZE;
+out1:
+	crypto_free_cipher(cipher);
+out:
+	return ret;
+}
+
+/*
+ * chcr_xfrm_add_state
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int chcr_xfrm_add_state(struct xfrm_state *x)
+{
+	struct ipsec_sa_entry *sa_entry;
+	int res = 0;
+
+	if (x->props.aalgo != SADB_AALG_NONE) {
+		pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+		return -EINVAL;
+	}
+	if (x->props.calgo != SADB_X_CALG_NONE) {
+		pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+		return -EINVAL;
+	}
+	if (x->props.flags & XFRM_STATE_ESN) {
+		pr_debug("CHCR: Cannot offload ESN xfrm states\n");
+		return -EINVAL;
+	}
+	if (x->props.family != AF_INET &&
+	    x->props.family != AF_INET6) {
+		pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+		return -EINVAL;
+	}
+	if (x->props.mode != XFRM_MODE_TRANSPORT &&
+	    x->props.mode != XFRM_MODE_TUNNEL) {
+		pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+		return -EINVAL;
+	}
+	if (x->id.proto != IPPROTO_ESP) {
+		pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+		return -EINVAL;
+	}
+	if (x->encap) {
+		pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+		return -EINVAL;
+	}
+	if (!x->aead) {
+		pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+		return -EINVAL;
+	}
+	if (x->aead->alg_icv_len != 128 &&
+	    x->aead->alg_icv_len != 96) {
+		pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+	return -EINVAL;
+	}
+	if ((x->aead->alg_key_len != 128 + 32) &&
+	    (x->aead->alg_key_len != 256 + 32)) {
+		pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+		return -EINVAL;
+	}
+	if (x->tfcpad) {
+		pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+		return -EINVAL;
+	}
+	if (!x->geniv) {
+		pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+		return -EINVAL;
+	}
+	if (strcmp(x->geniv, "seqiv")) {
+		pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+		return -EINVAL;
+	}
+
+	sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+	if (!sa_entry) {
+		res = -ENOMEM;
+		goto out;
+	}
+
+	sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+	chcr_ipsec_setkey(x, sa_entry);
+	x->xso.offload_handle = (unsigned long)sa_entry;
+	try_module_get(THIS_MODULE);
+out:
+	return res;
+}
+
+static void chcr_xfrm_del_state(struct xfrm_state *x)
+{
+	/* do nothing */
+	if (!x->xso.offload_handle)
+		return;
+}
+
+static void chcr_xfrm_free_state(struct xfrm_state *x)
+{
+	struct ipsec_sa_entry *sa_entry;
+
+	if (!x->xso.offload_handle)
+		return;
+
+	sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+	kfree(sa_entry);
+	module_put(THIS_MODULE);
+}
+
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+	/* Offload with IP options is not supported yet */
+	if (ip_hdr(skb)->ihl > 5)
+		return false;
+
+	return true;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+{
+	int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+
+	hdrlen += sizeof(struct cpl_tx_pkt);
+	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+		return hdrlen;
+	return 0;
+}
+
+static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+					     unsigned int kctx_len)
+{
+	unsigned int flits;
+	int hdrlen = is_eth_imm(skb, kctx_len);
+
+	/* If the skb is small enough, we can pump it out as a work request
+	 * with only immediate data.  In that case we just have to have the
+	 * TX Packet header plus the skb data in the Work Request.
+	 */
+
+	if (hdrlen)
+		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+
+	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+	/* Otherwise, we're going to have to construct a Scatter gather list
+	 * of the skb body and fragments.  We also include the flits necessary
+	 * for the TX Packet Work Request and CPL.  We always have a firmware
+	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+	 * message or, if we're doing a Large Send Offload, an LSO CPL message
+	 * with an embedded TX Packet Write CPL message.
+	 */
+	flits += (sizeof(struct fw_ulptx_wr) +
+		  sizeof(struct chcr_ipsec_req) +
+		  kctx_len +
+		  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	return flits;
+}
+
+inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+				struct net_device *dev,
+				void *pos)
+{
+	struct cpl_tx_pkt_core *cpl;
+	struct sge_eth_txq *q;
+	struct adapter *adap;
+	struct port_info *pi;
+	u32 ctrl0, qidx;
+	u64 cntrl = 0;
+	int left;
+
+	pi = netdev_priv(dev);
+	adap = pi->adapter;
+	qidx = skb->queue_mapping;
+	q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+	left = (void *)q->q.stat - pos;
+	if (!left)
+		pos = q->q.desc;
+
+	cpl = (struct cpl_tx_pkt_core *)pos;
+
+	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+			       TXPKT_PF_V(adap->pf);
+	if (skb_vlan_tag_present(skb)) {
+		q->vlan_ins++;
+		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+	}
+
+	cpl->ctrl0 = htonl(ctrl0);
+	cpl->pack = htons(0);
+	cpl->len = htons(skb->len);
+	cpl->ctrl1 = cpu_to_be64(cntrl);
+
+	pos += sizeof(struct cpl_tx_pkt_core);
+	return pos;
+}
+
+inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+				struct net_device *dev,
+				void *pos,
+				struct ipsec_sa_entry *sa_entry)
+{
+	struct _key_ctx *key_ctx;
+	int left, eoq, key_len;
+	struct sge_eth_txq *q;
+	struct adapter *adap;
+	struct port_info *pi;
+	unsigned int qidx;
+
+	pi = netdev_priv(dev);
+	adap = pi->adapter;
+	qidx = skb->queue_mapping;
+	q = &adap->sge.ethtxq[qidx + pi->first_qset];
+	key_len = sa_entry->kctx_len;
+
+	/* end of queue, reset pos to start of queue */
+	eoq = (void *)q->q.stat - pos;
+	left = eoq;
+	if (!eoq) {
+		pos = q->q.desc;
+		left = 64 * q->q.size;
+	}
+
+	/* Copy the Key context header */
+	key_ctx = (struct _key_ctx *)pos;
+	key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
+	memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
+	pos += sizeof(struct _key_ctx);
+	left -= sizeof(struct _key_ctx);
+
+	if (likely(key_len <= left)) {
+		memcpy(key_ctx->key, sa_entry->key, key_len);
+		pos += key_len;
+	} else {
+		memcpy(pos, sa_entry->key, left);
+		memcpy(q->q.desc, sa_entry->key + left,
+		       key_len - left);
+		pos = (u8 *)q->q.desc + (key_len - left);
+	}
+	/* Copy CPL TX PKT XT */
+	pos = copy_cpltx_pktxt(skb, dev, pos);
+
+	return pos;
+}
+
+inline void *chcr_crypto_wreq(struct sk_buff *skb,
+			       struct net_device *dev,
+			       void *pos,
+			       int credits,
+			       struct ipsec_sa_entry *sa_entry)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+	unsigned int immdatalen = 0;
+	unsigned int ivsize = GCM_ESP_IV_SIZE;
+	struct chcr_ipsec_wr *wr;
+	unsigned int flits;
+	u32 wr_mid;
+	int qidx = skb_get_queue_mapping(skb);
+	struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+	unsigned int kctx_len = sa_entry->kctx_len;
+	int qid = q->q.cntxt_id;
+
+	atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+	flits = calc_tx_sec_flits(skb, kctx_len);
+
+	if (is_eth_imm(skb, kctx_len))
+		immdatalen = skb->len;
+
+	/* WR Header */
+	wr = (struct chcr_ipsec_wr *)pos;
+	wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+	wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+
+	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+		netif_tx_stop_queue(q->txq);
+		q->q.stops++;
+		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+	}
+	wr_mid |= FW_ULPTX_WR_DATA_F;
+	wr->wreq.flowid_len16 = htonl(wr_mid);
+
+	/* ULPTX */
+	wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+	wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2)  - 1);
+
+	/* Sub-command */
+	wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
+	wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+					 sizeof(wr->req.key_ctx) +
+					 kctx_len +
+					 sizeof(struct cpl_tx_pkt_core) +
+					 immdatalen);
+
+	/* CPL_SEC_PDU */
+	wr->req.sec_cpl.op_ivinsrtofst = htonl(
+				CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+				CPL_TX_SEC_PDU_CPLLEN_V(2) |
+				CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+				CPL_TX_SEC_PDU_IVINSRTOFST_V(
+				(skb_transport_offset(skb) +
+				sizeof(struct ip_esp_hdr) + 1)));
+
+	wr->req.sec_cpl.pldlen = htonl(skb->len);
+
+	wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+				(skb_transport_offset(skb) + 1),
+				(skb_transport_offset(skb) +
+				 sizeof(struct ip_esp_hdr)),
+				(skb_transport_offset(skb) +
+				 sizeof(struct ip_esp_hdr) +
+				 GCM_ESP_IV_SIZE + 1), 0);
+
+	wr->req.sec_cpl.cipherstop_lo_authinsert =
+		FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
+					   sizeof(struct ip_esp_hdr) +
+					   GCM_ESP_IV_SIZE + 1,
+					   sa_entry->authsize,
+					   sa_entry->authsize);
+	wr->req.sec_cpl.seqno_numivs =
+		FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+					 CHCR_SCMD_CIPHER_MODE_AES_GCM,
+					 CHCR_SCMD_AUTH_MODE_GHASH,
+					 sa_entry->hmac_ctrl,
+					 ivsize >> 1);
+	wr->req.sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+								  0, 0, 0);
+
+	pos += sizeof(struct fw_ulptx_wr) +
+	       sizeof(struct ulp_txpkt) +
+	       sizeof(struct ulptx_idata) +
+	       sizeof(struct cpl_tx_sec_pdu);
+
+	pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+	return pos;
+}
+
+/**
+ *      flits_to_desc - returns the num of Tx descriptors for the given flits
+ *      @n: the number of flits
+ *
+ *      Returns the number of Tx descriptors needed for the supplied number
+ *      of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+	WARN_ON(n > SGE_MAX_WR_LEN / 8);
+	return DIV_ROUND_UP(n, 8);
+}
+
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+	return q->size - 1 - q->in_use;
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+	netif_tx_stop_queue(q->txq);
+	q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+	q->in_use += n;
+	q->pidx += n;
+	if (q->pidx >= q->size)
+		q->pidx -= q->size;
+}
+
+/*
+ *      chcr_ipsec_xmit called from ULD Tx handler
+ */
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct xfrm_state *x = xfrm_input_state(skb);
+	struct ipsec_sa_entry *sa_entry;
+	u64 *pos, *end, *before, *sgl;
+	int qidx, left, credits;
+	unsigned int flits = 0, ndesc, kctx_len;
+	struct adapter *adap;
+	struct sge_eth_txq *q;
+	struct port_info *pi;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+	bool immediate = false;
+
+	if (!x->xso.offload_handle)
+		return NETDEV_TX_BUSY;
+
+	sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+	kctx_len = sa_entry->kctx_len;
+
+	if (skb->sp->len != 1) {
+out_free:       dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	pi = netdev_priv(dev);
+	adap = pi->adapter;
+	qidx = skb->queue_mapping;
+	q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+	cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+	flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+	ndesc = flits_to_desc(flits);
+	credits = txq_avail(&q->q) - ndesc;
+
+	if (unlikely(credits < 0)) {
+		eth_txq_stop(q);
+		dev_err(adap->pdev_dev,
+			"%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
+			dev->name, qidx, credits, ndesc, txq_avail(&q->q),
+			flits);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (is_eth_imm(skb, kctx_len))
+		immediate = true;
+
+	if (!immediate &&
+	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+		q->mapping_err++;
+		goto out_free;
+	}
+
+	pos = (u64 *)&q->q.desc[q->q.pidx];
+	before = (u64 *)pos;
+	end = (u64 *)pos + flits;
+	/* Setup IPSec CPL */
+	pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
+				       credits, sa_entry);
+	if (before > (u64 *)pos) {
+		left = (u8 *)end - (u8 *)q->q.stat;
+		end = (void *)q->q.desc + left;
+	}
+	if (pos == (u64 *)q->q.stat) {
+		left = (u8 *)end - (u8 *)q->q.stat;
+		end = (void *)q->q.desc + left;
+		pos = (void *)q->q.desc;
+	}
+
+	sgl = (void *)pos;
+	if (immediate) {
+		cxgb4_inline_tx_skb(skb, &q->q, sgl);
+		dev_consume_skb_any(skb);
+	} else {
+		int last_desc;
+
+		cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
+				0, addr);
+		skb_orphan(skb);
+
+		last_desc = q->q.pidx + ndesc - 1;
+		if (last_desc >= q->q.size)
+			last_desc -= q->q.size;
+		q->q.sdesc[last_desc].skb = skb;
+		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+	}
+	txq_advance(&q->q, ndesc);
+
+	cxgb4_ring_tx_db(adap, &q->q, ndesc);
+	return NETDEV_TX_OK;
+}
diff --git a/drivers/crypto/chelsio/chtls/Makefile b/drivers/crypto/chelsio/chtls/Makefile
new file mode 100644
index 0000000..df13795
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 -Idrivers/crypto/chelsio/
+
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls.o
+chtls-objs := chtls_main.o chtls_cm.o chtls_io.o chtls_hw.o
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
new file mode 100644
index 0000000..7725b6e
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CHTLS_H__
+#define __CHTLS_H__
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/ctr.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+#include <linux/tls.h>
+#include <net/tls.h>
+
+#include "t4fw_api.h"
+#include "t4_msg.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "chcr_algo.h"
+#include "chcr_core.h"
+#include "chcr_crypto.h"
+
+#define MAX_IVS_PAGE			256
+#define TLS_KEY_CONTEXT_SZ		64
+#define CIPHER_BLOCK_SIZE		16
+#define GCM_TAG_SIZE			16
+#define KEY_ON_MEM_SZ			16
+#define AEAD_EXPLICIT_DATA_SIZE		8
+#define TLS_HEADER_LENGTH		5
+#define SCMD_CIPH_MODE_AES_GCM		2
+/* Any MFS size should work and come from openssl */
+#define TLS_MFS				16384
+
+#define RSS_HDR sizeof(struct rss_header)
+#define TLS_WR_CPL_LEN \
+	(sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo))
+
+enum {
+	CHTLS_KEY_CONTEXT_DSGL,
+	CHTLS_KEY_CONTEXT_IMM,
+	CHTLS_KEY_CONTEXT_DDR,
+};
+
+enum {
+	CHTLS_LISTEN_START,
+	CHTLS_LISTEN_STOP,
+};
+
+/* Flags for return value of CPL message handlers */
+enum {
+	CPL_RET_BUF_DONE =    1,   /* buffer processing done */
+	CPL_RET_BAD_MSG =     2,   /* bad CPL message */
+	CPL_RET_UNKNOWN_TID = 4    /* unexpected unknown TID */
+};
+
+#define LISTEN_INFO_HASH_SIZE 32
+#define RSPQ_HASH_BITS 5
+struct listen_info {
+	struct listen_info *next;  /* Link to next entry */
+	struct sock *sk;           /* The listening socket */
+	unsigned int stid;         /* The server TID */
+};
+
+enum {
+	T4_LISTEN_START_PENDING,
+	T4_LISTEN_STARTED
+};
+
+enum csk_flags {
+	CSK_CALLBACKS_CHKD,	/* socket callbacks have been sanitized */
+	CSK_ABORT_REQ_RCVD,	/* received one ABORT_REQ_RSS message */
+	CSK_TX_MORE_DATA,	/* sending ULP data; don't set SHOVE bit */
+	CSK_TX_WAIT_IDLE,	/* suspend Tx until in-flight data is ACKed */
+	CSK_ABORT_SHUTDOWN,	/* shouldn't send more abort requests */
+	CSK_ABORT_RPL_PENDING,	/* expecting an abort reply */
+	CSK_CLOSE_CON_REQUESTED,/* we've sent a close_conn_req */
+	CSK_TX_DATA_SENT,	/* sent a TX_DATA WR on this connection */
+	CSK_TX_FAILOVER,	/* Tx traffic failing over */
+	CSK_UPDATE_RCV_WND,	/* Need to update rcv window */
+	CSK_RST_ABORTED,	/* outgoing RST was aborted */
+	CSK_TLS_HANDSHK,	/* TLS Handshake */
+	CSK_CONN_INLINE,	/* Connection on HW */
+};
+
+enum chtls_cdev_state {
+	CHTLS_CDEV_STATE_UP = 1
+};
+
+struct listen_ctx {
+	struct sock *lsk;
+	struct chtls_dev *cdev;
+	struct sk_buff_head synq;
+	u32 state;
+};
+
+struct key_map {
+	unsigned long *addr;
+	unsigned int start;
+	unsigned int available;
+	unsigned int size;
+	spinlock_t lock; /* lock for key id request from map */
+} __packed;
+
+struct tls_scmd {
+	u32 seqno_numivs;
+	u32 ivgen_hdrlen;
+};
+
+struct chtls_dev {
+	struct tls_device tlsdev;
+	struct list_head list;
+	struct cxgb4_lld_info *lldi;
+	struct pci_dev *pdev;
+	struct listen_info *listen_hash_tab[LISTEN_INFO_HASH_SIZE];
+	spinlock_t listen_lock; /* lock for listen list */
+	struct net_device **ports;
+	struct tid_info *tids;
+	unsigned int pfvf;
+	const unsigned short *mtus;
+
+	struct idr hwtid_idr;
+	struct idr stid_idr;
+
+	spinlock_t idr_lock ____cacheline_aligned_in_smp;
+
+	struct net_device *egr_dev[NCHAN * 2];
+	struct sk_buff *rspq_skb_cache[1 << RSPQ_HASH_BITS];
+	struct sk_buff *askb;
+
+	struct sk_buff_head deferq;
+	struct work_struct deferq_task;
+
+	struct list_head list_node;
+	struct list_head rcu_node;
+	struct list_head na_node;
+	unsigned int send_page_order;
+	int max_host_sndbuf;
+	struct key_map kmap;
+	unsigned int cdev_state;
+};
+
+struct chtls_hws {
+	struct sk_buff_head sk_recv_queue;
+	u8 txqid;
+	u8 ofld;
+	u16 type;
+	u16 rstate;
+	u16 keyrpl;
+	u16 pldlen;
+	u16 rcvpld;
+	u16 compute;
+	u16 expansion;
+	u16 keylen;
+	u16 pdus;
+	u16 adjustlen;
+	u16 ivsize;
+	u16 txleft;
+	u32 mfs;
+	s32 txkey;
+	s32 rxkey;
+	u32 fcplenmax;
+	u32 copied_seq;
+	u64 tx_seq_no;
+	struct tls_scmd scmd;
+	struct tls12_crypto_info_aes_gcm_128 crypto_info;
+};
+
+struct chtls_sock {
+	struct sock *sk;
+	struct chtls_dev *cdev;
+	struct l2t_entry *l2t_entry;    /* pointer to the L2T entry */
+	struct net_device *egress_dev;  /* TX_CHAN for act open retry */
+
+	struct sk_buff_head txq;
+	struct sk_buff *wr_skb_head;
+	struct sk_buff *wr_skb_tail;
+	struct sk_buff *ctrl_skb_cache;
+	struct sk_buff *txdata_skb_cache; /* abort path messages */
+	struct kref kref;
+	unsigned long flags;
+	u32 opt2;
+	u32 wr_credits;
+	u32 wr_unacked;
+	u32 wr_max_credits;
+	u32 wr_nondata;
+	u32 hwtid;               /* TCP Control Block ID */
+	u32 txq_idx;
+	u32 rss_qid;
+	u32 tid;
+	u32 idr;
+	u32 mss;
+	u32 ulp_mode;
+	u32 tx_chan;
+	u32 rx_chan;
+	u32 sndbuf;
+	u32 txplen_max;
+	u32 mtu_idx;           /* MTU table index */
+	u32 smac_idx;
+	u8 port_id;
+	u8 tos;
+	u16 resv2;
+	u32 delack_mode;
+	u32 delack_seq;
+
+	void *passive_reap_next;        /* placeholder for passive */
+	struct chtls_hws tlshws;
+	struct synq {
+		struct sk_buff *next;
+		struct sk_buff *prev;
+	} synq;
+	struct listen_ctx *listen_ctx;
+};
+
+struct tls_hdr {
+	u8  type;
+	u16 version;
+	u16 length;
+} __packed;
+
+struct tlsrx_cmp_hdr {
+	u8  type;
+	u16 version;
+	u16 length;
+
+	u64 tls_seq;
+	u16 reserved1;
+	u8  res_to_mac_error;
+} __packed;
+
+/* res_to_mac_error fields */
+#define TLSRX_HDR_PKT_INT_ERROR_S   4
+#define TLSRX_HDR_PKT_INT_ERROR_M   0x1
+#define TLSRX_HDR_PKT_INT_ERROR_V(x) \
+	((x) << TLSRX_HDR_PKT_INT_ERROR_S)
+#define TLSRX_HDR_PKT_INT_ERROR_G(x) \
+	(((x) >> TLSRX_HDR_PKT_INT_ERROR_S) & TLSRX_HDR_PKT_INT_ERROR_M)
+#define TLSRX_HDR_PKT_INT_ERROR_F   TLSRX_HDR_PKT_INT_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_SPP_ERROR_S        3
+#define TLSRX_HDR_PKT_SPP_ERROR_M        0x1
+#define TLSRX_HDR_PKT_SPP_ERROR_V(x)     ((x) << TLSRX_HDR_PKT_SPP_ERROR)
+#define TLSRX_HDR_PKT_SPP_ERROR_G(x)     \
+	(((x) >> TLSRX_HDR_PKT_SPP_ERROR_S) & TLSRX_HDR_PKT_SPP_ERROR_M)
+#define TLSRX_HDR_PKT_SPP_ERROR_F        TLSRX_HDR_PKT_SPP_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_CCDX_ERROR_S       2
+#define TLSRX_HDR_PKT_CCDX_ERROR_M       0x1
+#define TLSRX_HDR_PKT_CCDX_ERROR_V(x)    ((x) << TLSRX_HDR_PKT_CCDX_ERROR_S)
+#define TLSRX_HDR_PKT_CCDX_ERROR_G(x)    \
+	(((x) >> TLSRX_HDR_PKT_CCDX_ERROR_S) & TLSRX_HDR_PKT_CCDX_ERROR_M)
+#define TLSRX_HDR_PKT_CCDX_ERROR_F       TLSRX_HDR_PKT_CCDX_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_PAD_ERROR_S        1
+#define TLSRX_HDR_PKT_PAD_ERROR_M        0x1
+#define TLSRX_HDR_PKT_PAD_ERROR_V(x)     ((x) << TLSRX_HDR_PKT_PAD_ERROR_S)
+#define TLSRX_HDR_PKT_PAD_ERROR_G(x)     \
+	(((x) >> TLSRX_HDR_PKT_PAD_ERROR_S) & TLSRX_HDR_PKT_PAD_ERROR_M)
+#define TLSRX_HDR_PKT_PAD_ERROR_F        TLSRX_HDR_PKT_PAD_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_MAC_ERROR_S        0
+#define TLSRX_HDR_PKT_MAC_ERROR_M        0x1
+#define TLSRX_HDR_PKT_MAC_ERROR_V(x)     ((x) << TLSRX_HDR_PKT_MAC_ERROR)
+#define TLSRX_HDR_PKT_MAC_ERROR_G(x)     \
+	(((x) >> S_TLSRX_HDR_PKT_MAC_ERROR_S) & TLSRX_HDR_PKT_MAC_ERROR_M)
+#define TLSRX_HDR_PKT_MAC_ERROR_F        TLSRX_HDR_PKT_MAC_ERROR_V(1U)
+
+#define TLSRX_HDR_PKT_ERROR_M           0x1F
+#define CONTENT_TYPE_ERROR		0x7F
+
+struct ulp_mem_rw {
+	__be32 cmd;
+	__be32 len16;             /* command length */
+	__be32 dlen;              /* data length in 32-byte units */
+	__be32 lock_addr;
+};
+
+struct tls_key_wr {
+	__be32 op_to_compl;
+	__be32 flowid_len16;
+	__be32 ftid;
+	u8   reneg_to_write_rx;
+	u8   protocol;
+	__be16 mfs;
+};
+
+struct tls_key_req {
+	struct tls_key_wr wr;
+	struct ulp_mem_rw req;
+	struct ulptx_idata sc_imm;
+};
+
+/*
+ * This lives in skb->cb and is used to chain WRs in a linked list.
+ */
+struct wr_skb_cb {
+	struct l2t_skb_cb l2t;          /* reserve space for l2t CB */
+	struct sk_buff *next_wr;        /* next write request */
+};
+
+/* Per-skb backlog handler.  Run when a socket's backlog is processed. */
+struct blog_skb_cb {
+	void (*backlog_rcv)(struct sock *sk, struct sk_buff *skb);
+	struct chtls_dev *cdev;
+};
+
+/*
+ * Similar to tcp_skb_cb but with ULP elements added to support TLS,
+ * etc.
+ */
+struct ulp_skb_cb {
+	struct wr_skb_cb wr;		/* reserve space for write request */
+	u16 flags;			/* TCP-like flags */
+	u8 psh;
+	u8 ulp_mode;			/* ULP mode/submode of sk_buff */
+	u32 seq;			/* TCP sequence number */
+	union { /* ULP-specific fields */
+		struct {
+			u8  type;
+			u8  ofld;
+			u8  iv;
+		} tls;
+	} ulp;
+};
+
+#define ULP_SKB_CB(skb) ((struct ulp_skb_cb *)&((skb)->cb[0]))
+#define BLOG_SKB_CB(skb) ((struct blog_skb_cb *)(skb)->cb)
+
+/*
+ * Flags for ulp_skb_cb.flags.
+ */
+enum {
+	ULPCB_FLAG_NEED_HDR  = 1 << 0,	/* packet needs a TX_DATA_WR header */
+	ULPCB_FLAG_NO_APPEND = 1 << 1,	/* don't grow this skb */
+	ULPCB_FLAG_BARRIER   = 1 << 2,	/* set TX_WAIT_IDLE after sending */
+	ULPCB_FLAG_HOLD      = 1 << 3,	/* skb not ready for Tx yet */
+	ULPCB_FLAG_COMPL     = 1 << 4,	/* request WR completion */
+	ULPCB_FLAG_URG       = 1 << 5,	/* urgent data */
+	ULPCB_FLAG_TLS_HDR   = 1 << 6,  /* payload with tls hdr */
+	ULPCB_FLAG_NO_HDR    = 1 << 7,  /* not a ofld wr */
+};
+
+/* The ULP mode/submode of an skbuff */
+#define skb_ulp_mode(skb)  (ULP_SKB_CB(skb)->ulp_mode)
+#define TCP_PAGE(sk)   (sk->sk_frag.page)
+#define TCP_OFF(sk)    (sk->sk_frag.offset)
+
+static inline struct chtls_dev *to_chtls_dev(struct tls_device *tlsdev)
+{
+	return container_of(tlsdev, struct chtls_dev, tlsdev);
+}
+
+static inline void csk_set_flag(struct chtls_sock *csk,
+				enum csk_flags flag)
+{
+	__set_bit(flag, &csk->flags);
+}
+
+static inline void csk_reset_flag(struct chtls_sock *csk,
+				  enum csk_flags flag)
+{
+	__clear_bit(flag, &csk->flags);
+}
+
+static inline bool csk_conn_inline(const struct chtls_sock *csk)
+{
+	return test_bit(CSK_CONN_INLINE, &csk->flags);
+}
+
+static inline int csk_flag(const struct sock *sk, enum csk_flags flag)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+	if (!csk_conn_inline(csk))
+		return 0;
+	return test_bit(flag, &csk->flags);
+}
+
+static inline int csk_flag_nochk(const struct chtls_sock *csk,
+				 enum csk_flags flag)
+{
+	return test_bit(flag, &csk->flags);
+}
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+	return skb->data;
+}
+
+static inline int is_neg_adv(unsigned int status)
+{
+	return status == CPL_ERR_RTX_NEG_ADVICE ||
+	       status == CPL_ERR_KEEPALV_NEG_ADVICE ||
+	       status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static inline void process_cpl_msg(void (*fn)(struct sock *, struct sk_buff *),
+				   struct sock *sk,
+				   struct sk_buff *skb)
+{
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	skb_reset_transport_header(skb);
+
+	bh_lock_sock(sk);
+	if (unlikely(sock_owned_by_user(sk))) {
+		BLOG_SKB_CB(skb)->backlog_rcv = fn;
+		__sk_add_backlog(sk, skb);
+	} else {
+		fn(sk, skb);
+	}
+	bh_unlock_sock(sk);
+}
+
+static inline void chtls_sock_free(struct kref *ref)
+{
+	struct chtls_sock *csk = container_of(ref, struct chtls_sock,
+					      kref);
+	kfree(csk);
+}
+
+static inline void __chtls_sock_put(const char *fn, struct chtls_sock *csk)
+{
+	kref_put(&csk->kref, chtls_sock_free);
+}
+
+static inline void __chtls_sock_get(const char *fn,
+				    struct chtls_sock *csk)
+{
+	kref_get(&csk->kref);
+}
+
+static inline void send_or_defer(struct sock *sk, struct tcp_sock *tp,
+				 struct sk_buff *skb, int through_l2t)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+	if (through_l2t) {
+		/* send through L2T */
+		cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
+	} else {
+		/* send directly */
+		cxgb4_ofld_send(csk->egress_dev, skb);
+	}
+}
+
+typedef int (*chtls_handler_func)(struct chtls_dev *, struct sk_buff *);
+extern chtls_handler_func chtls_handlers[NUM_CPL_CMDS];
+void chtls_install_cpl_ops(struct sock *sk);
+int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi);
+void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk);
+int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk);
+void chtls_close(struct sock *sk, long timeout);
+int chtls_disconnect(struct sock *sk, int flags);
+void chtls_shutdown(struct sock *sk, int how);
+void chtls_destroy_sock(struct sock *sk);
+int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
+		  size_t len, int nonblock, int flags, int *addr_len);
+int chtls_sendpage(struct sock *sk, struct page *page,
+		   int offset, size_t size, int flags);
+int send_tx_flowc_wr(struct sock *sk, int compl,
+		     u32 snd_nxt, u32 rcv_nxt);
+void chtls_tcp_push(struct sock *sk, int flags);
+int chtls_push_frames(struct chtls_sock *csk, int comp);
+int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode);
+void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
+unsigned int keyid_to_addr(int start_addr, int keyid);
+void free_tls_keyid(struct sock *sk);
+#endif
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
new file mode 100644
index 0000000..0997e16
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -0,0 +1,2146 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sched/signal.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/if_vlan.h>
+#include <net/tcp.h>
+#include <net/dst.h>
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+/*
+ * State transitions and actions for close.  Note that if we are in SYN_SENT
+ * we remain in that state as we cannot control a connection while it's in
+ * SYN_SENT; such connections are allowed to establish and are then aborted.
+ */
+static unsigned char new_state[16] = {
+	/* current state:     new state:      action: */
+	/* (Invalid)       */ TCP_CLOSE,
+	/* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
+	/* TCP_SYN_SENT    */ TCP_SYN_SENT,
+	/* TCP_SYN_RECV    */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
+	/* TCP_FIN_WAIT1   */ TCP_FIN_WAIT1,
+	/* TCP_FIN_WAIT2   */ TCP_FIN_WAIT2,
+	/* TCP_TIME_WAIT   */ TCP_CLOSE,
+	/* TCP_CLOSE       */ TCP_CLOSE,
+	/* TCP_CLOSE_WAIT  */ TCP_LAST_ACK | TCP_ACTION_FIN,
+	/* TCP_LAST_ACK    */ TCP_LAST_ACK,
+	/* TCP_LISTEN      */ TCP_CLOSE,
+	/* TCP_CLOSING     */ TCP_CLOSING,
+};
+
+static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev)
+{
+	struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
+
+	if (!csk)
+		return NULL;
+
+	csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC);
+	if (!csk->txdata_skb_cache) {
+		kfree(csk);
+		return NULL;
+	}
+
+	kref_init(&csk->kref);
+	csk->cdev = cdev;
+	skb_queue_head_init(&csk->txq);
+	csk->wr_skb_head = NULL;
+	csk->wr_skb_tail = NULL;
+	csk->mss = MAX_MSS;
+	csk->tlshws.ofld = 1;
+	csk->tlshws.txkey = -1;
+	csk->tlshws.rxkey = -1;
+	csk->tlshws.mfs = TLS_MFS;
+	skb_queue_head_init(&csk->tlshws.sk_recv_queue);
+	return csk;
+}
+
+static void chtls_sock_release(struct kref *ref)
+{
+	struct chtls_sock *csk =
+		container_of(ref, struct chtls_sock, kref);
+
+	kfree(csk);
+}
+
+static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev,
+					    struct sock *sk)
+{
+	struct net_device *ndev = cdev->ports[0];
+
+	if (likely(!inet_sk(sk)->inet_rcv_saddr))
+		return ndev;
+
+	ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
+	if (!ndev)
+		return NULL;
+
+	if (is_vlan_dev(ndev))
+		return vlan_dev_real_dev(ndev);
+	return ndev;
+}
+
+static void assign_rxopt(struct sock *sk, unsigned int opt)
+{
+	const struct chtls_dev *cdev;
+	struct chtls_sock *csk;
+	struct tcp_sock *tp;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	tp = tcp_sk(sk);
+
+	cdev = csk->cdev;
+	tp->tcp_header_len           = sizeof(struct tcphdr);
+	tp->rx_opt.mss_clamp         = cdev->mtus[TCPOPT_MSS_G(opt)] - 40;
+	tp->mss_cache                = tp->rx_opt.mss_clamp;
+	tp->rx_opt.tstamp_ok         = TCPOPT_TSTAMP_G(opt);
+	tp->rx_opt.snd_wscale        = TCPOPT_SACK_G(opt);
+	tp->rx_opt.wscale_ok         = TCPOPT_WSCALE_OK_G(opt);
+	SND_WSCALE(tp)               = TCPOPT_SND_WSCALE_G(opt);
+	if (!tp->rx_opt.wscale_ok)
+		tp->rx_opt.rcv_wscale = 0;
+	if (tp->rx_opt.tstamp_ok) {
+		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
+		tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED;
+	} else if (csk->opt2 & TSTAMPS_EN_F) {
+		csk->opt2 &= ~TSTAMPS_EN_F;
+		csk->mtu_idx = TCPOPT_MSS_G(opt);
+	}
+}
+
+static void chtls_purge_receive_queue(struct sock *sk)
+{
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
+		skb_dst_set(skb, (void *)NULL);
+		kfree_skb(skb);
+	}
+}
+
+static void chtls_purge_write_queue(struct sock *sk)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(&csk->txq))) {
+		sk->sk_wmem_queued -= skb->truesize;
+		__kfree_skb(skb);
+	}
+}
+
+static void chtls_purge_recv_queue(struct sock *sk)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct chtls_hws *tlsk = &csk->tlshws;
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
+		skb_dst_set(skb, NULL);
+		kfree_skb(skb);
+	}
+}
+
+static void abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+	struct cpl_abort_req *req = cplhdr(skb);
+	struct chtls_dev *cdev;
+
+	cdev = (struct chtls_dev *)handle;
+	req->cmd = CPL_ABORT_NO_RST;
+	cxgb4_ofld_send(cdev->lldi->ports[0], skb);
+}
+
+static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
+{
+	if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
+		__skb_trim(skb, 0);
+		refcount_add(2, &skb->users);
+	} else {
+		skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+	}
+	return skb;
+}
+
+static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
+{
+	struct cpl_abort_req *req;
+	struct chtls_sock *csk;
+	struct tcp_sock *tp;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	tp = tcp_sk(sk);
+
+	if (!skb)
+		skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
+
+	req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
+	INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
+	skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+	req->rsvd0 = htonl(tp->snd_nxt);
+	req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
+	req->cmd = mode;
+	t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
+	send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
+}
+
+static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+	if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) ||
+		     !csk->cdev)) {
+		if (sk->sk_state == TCP_SYN_RECV)
+			csk_set_flag(csk, CSK_RST_ABORTED);
+		goto out;
+	}
+
+	if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
+		struct tcp_sock *tp = tcp_sk(sk);
+
+		if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
+			WARN_ONCE(1, "send tx flowc error");
+		csk_set_flag(csk, CSK_TX_DATA_SENT);
+	}
+
+	csk_set_flag(csk, CSK_ABORT_RPL_PENDING);
+	chtls_purge_write_queue(sk);
+
+	csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
+	if (sk->sk_state != TCP_SYN_RECV)
+		chtls_send_abort(sk, mode, skb);
+	else
+		goto out;
+
+	return;
+out:
+	if (skb)
+		kfree_skb(skb);
+}
+
+static void release_tcp_port(struct sock *sk)
+{
+	if (inet_csk(sk)->icsk_bind_hash)
+		inet_put_port(sk);
+}
+
+static void tcp_uncork(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	if (tp->nonagle & TCP_NAGLE_CORK) {
+		tp->nonagle &= ~TCP_NAGLE_CORK;
+		chtls_tcp_push(sk, 0);
+	}
+}
+
+static void chtls_close_conn(struct sock *sk)
+{
+	struct cpl_close_con_req *req;
+	struct chtls_sock *csk;
+	struct sk_buff *skb;
+	unsigned int tid;
+	unsigned int len;
+
+	len = roundup(sizeof(struct cpl_close_con_req), 16);
+	csk = rcu_dereference_sk_user_data(sk);
+	tid = csk->tid;
+
+	skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+	req = (struct cpl_close_con_req *)__skb_put(skb, len);
+	memset(req, 0, len);
+	req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
+			      FW_WR_IMMDLEN_V(sizeof(*req) -
+					      sizeof(req->wr)));
+	req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
+			       FW_WR_FLOWID_V(tid));
+
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+
+	tcp_uncork(sk);
+	skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
+	if (sk->sk_state != TCP_SYN_SENT)
+		chtls_push_frames(csk, 1);
+}
+
+/*
+ * Perform a state transition during close and return the actions indicated
+ * for the transition.  Do not make this function inline, the main reason
+ * it exists at all is to avoid multiple inlining of tcp_set_state.
+ */
+static int make_close_transition(struct sock *sk)
+{
+	int next = (int)new_state[sk->sk_state];
+
+	tcp_set_state(sk, next & TCP_STATE_MASK);
+	return next & TCP_ACTION_FIN;
+}
+
+void chtls_close(struct sock *sk, long timeout)
+{
+	int data_lost, prev_state;
+	struct chtls_sock *csk;
+
+	csk = rcu_dereference_sk_user_data(sk);
+
+	lock_sock(sk);
+	sk->sk_shutdown |= SHUTDOWN_MASK;
+
+	data_lost = skb_queue_len(&sk->sk_receive_queue);
+	data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue);
+	chtls_purge_recv_queue(sk);
+	chtls_purge_receive_queue(sk);
+
+	if (sk->sk_state == TCP_CLOSE) {
+		goto wait;
+	} else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
+		chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
+		release_tcp_port(sk);
+		goto unlock;
+	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+		sk->sk_prot->disconnect(sk, 0);
+	} else if (make_close_transition(sk)) {
+		chtls_close_conn(sk);
+	}
+wait:
+	if (timeout)
+		sk_stream_wait_close(sk, timeout);
+
+unlock:
+	prev_state = sk->sk_state;
+	sock_hold(sk);
+	sock_orphan(sk);
+
+	release_sock(sk);
+
+	local_bh_disable();
+	bh_lock_sock(sk);
+
+	if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
+		goto out;
+
+	if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
+	    !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
+		struct sk_buff *skb;
+
+		skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
+		if (skb)
+			chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
+	}
+
+	if (sk->sk_state == TCP_CLOSE)
+		inet_csk_destroy_sock(sk);
+
+out:
+	bh_unlock_sock(sk);
+	local_bh_enable();
+	sock_put(sk);
+}
+
+/*
+ * Wait until a socket enters on of the given states.
+ */
+static int wait_for_states(struct sock *sk, unsigned int states)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	struct socket_wq _sk_wq;
+	long current_timeo;
+	int err = 0;
+
+	current_timeo = 200;
+
+	/*
+	 * We want this to work even when there's no associated struct socket.
+	 * In that case we provide a temporary wait_queue_head_t.
+	 */
+	if (!sk->sk_wq) {
+		init_waitqueue_head(&_sk_wq.wait);
+		_sk_wq.fasync_list = NULL;
+		init_rcu_head_on_stack(&_sk_wq.rcu);
+		RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
+	}
+
+	add_wait_queue(sk_sleep(sk), &wait);
+	while (!sk_in_state(sk, states)) {
+		if (!current_timeo) {
+			err = -EBUSY;
+			break;
+		}
+		if (signal_pending(current)) {
+			err = sock_intr_errno(current_timeo);
+			break;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		release_sock(sk);
+		if (!sk_in_state(sk, states))
+			current_timeo = schedule_timeout(current_timeo);
+		__set_current_state(TASK_RUNNING);
+		lock_sock(sk);
+	}
+	remove_wait_queue(sk_sleep(sk), &wait);
+
+	if (rcu_dereference(sk->sk_wq) == &_sk_wq)
+		sk->sk_wq = NULL;
+	return err;
+}
+
+int chtls_disconnect(struct sock *sk, int flags)
+{
+	struct chtls_sock *csk;
+	struct tcp_sock *tp;
+	int err;
+
+	tp = tcp_sk(sk);
+	csk = rcu_dereference_sk_user_data(sk);
+	chtls_purge_recv_queue(sk);
+	chtls_purge_receive_queue(sk);
+	chtls_purge_write_queue(sk);
+
+	if (sk->sk_state != TCP_CLOSE) {
+		sk->sk_err = ECONNRESET;
+		chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
+		err = wait_for_states(sk, TCPF_CLOSE);
+		if (err)
+			return err;
+	}
+	chtls_purge_recv_queue(sk);
+	chtls_purge_receive_queue(sk);
+	tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale);
+	return tcp_disconnect(sk, flags);
+}
+
+#define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
+				 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
+void chtls_shutdown(struct sock *sk, int how)
+{
+	if ((how & SEND_SHUTDOWN) &&
+	    sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
+	    make_close_transition(sk))
+		chtls_close_conn(sk);
+}
+
+void chtls_destroy_sock(struct sock *sk)
+{
+	struct chtls_sock *csk;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	chtls_purge_recv_queue(sk);
+	csk->ulp_mode = ULP_MODE_NONE;
+	chtls_purge_write_queue(sk);
+	free_tls_keyid(sk);
+	kref_put(&csk->kref, chtls_sock_release);
+	sk->sk_prot = &tcp_prot;
+	sk->sk_prot->destroy(sk);
+}
+
+static void reset_listen_child(struct sock *child)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(child);
+	struct sk_buff *skb;
+
+	skb = alloc_ctrl_skb(csk->txdata_skb_cache,
+			     sizeof(struct cpl_abort_req));
+
+	chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
+	sock_orphan(child);
+	INC_ORPHAN_COUNT(child);
+	if (child->sk_state == TCP_CLOSE)
+		inet_csk_destroy_sock(child);
+}
+
+static void chtls_disconnect_acceptq(struct sock *listen_sk)
+{
+	struct request_sock **pprev;
+
+	pprev = ACCEPT_QUEUE(listen_sk);
+	while (*pprev) {
+		struct request_sock *req = *pprev;
+
+		if (req->rsk_ops == &chtls_rsk_ops) {
+			struct sock *child = req->sk;
+
+			*pprev = req->dl_next;
+			sk_acceptq_removed(listen_sk);
+			reqsk_put(req);
+			sock_hold(child);
+			local_bh_disable();
+			bh_lock_sock(child);
+			release_tcp_port(child);
+			reset_listen_child(child);
+			bh_unlock_sock(child);
+			local_bh_enable();
+			sock_put(child);
+		} else {
+			pprev = &req->dl_next;
+		}
+	}
+}
+
+static int listen_hashfn(const struct sock *sk)
+{
+	return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
+}
+
+static struct listen_info *listen_hash_add(struct chtls_dev *cdev,
+					   struct sock *sk,
+					   unsigned int stid)
+{
+	struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
+
+	if (p) {
+		int key = listen_hashfn(sk);
+
+		p->sk = sk;
+		p->stid = stid;
+		spin_lock(&cdev->listen_lock);
+		p->next = cdev->listen_hash_tab[key];
+		cdev->listen_hash_tab[key] = p;
+		spin_unlock(&cdev->listen_lock);
+	}
+	return p;
+}
+
+static int listen_hash_find(struct chtls_dev *cdev,
+			    struct sock *sk)
+{
+	struct listen_info *p;
+	int stid = -1;
+	int key;
+
+	key = listen_hashfn(sk);
+
+	spin_lock(&cdev->listen_lock);
+	for (p = cdev->listen_hash_tab[key]; p; p = p->next)
+		if (p->sk == sk) {
+			stid = p->stid;
+			break;
+		}
+	spin_unlock(&cdev->listen_lock);
+	return stid;
+}
+
+static int listen_hash_del(struct chtls_dev *cdev,
+			   struct sock *sk)
+{
+	struct listen_info *p, **prev;
+	int stid = -1;
+	int key;
+
+	key = listen_hashfn(sk);
+	prev = &cdev->listen_hash_tab[key];
+
+	spin_lock(&cdev->listen_lock);
+	for (p = *prev; p; prev = &p->next, p = p->next)
+		if (p->sk == sk) {
+			stid = p->stid;
+			*prev = p->next;
+			kfree(p);
+			break;
+		}
+	spin_unlock(&cdev->listen_lock);
+	return stid;
+}
+
+static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent)
+{
+	struct request_sock *req;
+	struct chtls_sock *csk;
+
+	csk = rcu_dereference_sk_user_data(child);
+	req = csk->passive_reap_next;
+
+	reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
+	__skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
+	chtls_reqsk_free(req);
+	csk->passive_reap_next = NULL;
+}
+
+static void chtls_reset_synq(struct listen_ctx *listen_ctx)
+{
+	struct sock *listen_sk = listen_ctx->lsk;
+
+	while (!skb_queue_empty(&listen_ctx->synq)) {
+		struct chtls_sock *csk =
+			container_of((struct synq *)__skb_dequeue
+				(&listen_ctx->synq), struct chtls_sock, synq);
+		struct sock *child = csk->sk;
+
+		cleanup_syn_rcv_conn(child, listen_sk);
+		sock_hold(child);
+		local_bh_disable();
+		bh_lock_sock(child);
+		release_tcp_port(child);
+		reset_listen_child(child);
+		bh_unlock_sock(child);
+		local_bh_enable();
+		sock_put(child);
+	}
+}
+
+int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
+{
+	struct net_device *ndev;
+	struct listen_ctx *ctx;
+	struct adapter *adap;
+	struct port_info *pi;
+	int stid;
+	int ret;
+
+	if (sk->sk_family != PF_INET)
+		return -EAGAIN;
+
+	rcu_read_lock();
+	ndev = chtls_ipv4_netdev(cdev, sk);
+	rcu_read_unlock();
+	if (!ndev)
+		return -EBADF;
+
+	pi = netdev_priv(ndev);
+	adap = pi->adapter;
+	if (!(adap->flags & FULL_INIT_DONE))
+		return -EBADF;
+
+	if (listen_hash_find(cdev, sk) >= 0)   /* already have it */
+		return -EADDRINUSE;
+
+	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	__module_get(THIS_MODULE);
+	ctx->lsk = sk;
+	ctx->cdev = cdev;
+	ctx->state = T4_LISTEN_START_PENDING;
+	skb_queue_head_init(&ctx->synq);
+
+	stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
+	if (stid < 0)
+		goto free_ctx;
+
+	sock_hold(sk);
+	if (!listen_hash_add(cdev, sk, stid))
+		goto free_stid;
+
+	ret = cxgb4_create_server(ndev, stid,
+				  inet_sk(sk)->inet_rcv_saddr,
+				  inet_sk(sk)->inet_sport, 0,
+				  cdev->lldi->rxq_ids[0]);
+	if (ret > 0)
+		ret = net_xmit_errno(ret);
+	if (ret)
+		goto del_hash;
+	return 0;
+del_hash:
+	listen_hash_del(cdev, sk);
+free_stid:
+	cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
+	sock_put(sk);
+free_ctx:
+	kfree(ctx);
+	module_put(THIS_MODULE);
+	return -EBADF;
+}
+
+void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
+{
+	struct listen_ctx *listen_ctx;
+	int stid;
+
+	stid = listen_hash_del(cdev, sk);
+	if (stid < 0)
+		return;
+
+	listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
+	chtls_reset_synq(listen_ctx);
+
+	cxgb4_remove_server(cdev->lldi->ports[0], stid,
+			    cdev->lldi->rxq_ids[0], 0);
+	chtls_disconnect_acceptq(sk);
+}
+
+static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
+	unsigned int stid = GET_TID(rpl);
+	struct listen_ctx *listen_ctx;
+
+	listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
+	if (!listen_ctx)
+		return CPL_RET_BUF_DONE;
+
+	if (listen_ctx->state == T4_LISTEN_START_PENDING) {
+		listen_ctx->state = T4_LISTEN_STARTED;
+		return CPL_RET_BUF_DONE;
+	}
+
+	if (rpl->status != CPL_ERR_NONE) {
+		pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
+			rpl->status, stid);
+		return CPL_RET_BUF_DONE;
+	}
+	cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+	sock_put(listen_ctx->lsk);
+	kfree(listen_ctx);
+	module_put(THIS_MODULE);
+
+	return 0;
+}
+
+static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
+	struct listen_ctx *listen_ctx;
+	unsigned int stid;
+	void *data;
+
+	stid = GET_TID(rpl);
+	data = lookup_stid(cdev->tids, stid);
+	listen_ctx = (struct listen_ctx *)data;
+
+	if (rpl->status != CPL_ERR_NONE) {
+		pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
+			rpl->status, stid);
+		return CPL_RET_BUF_DONE;
+	}
+
+	cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+	sock_put(listen_ctx->lsk);
+	kfree(listen_ctx);
+	module_put(THIS_MODULE);
+
+	return 0;
+}
+
+static void chtls_release_resources(struct sock *sk)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct chtls_dev *cdev = csk->cdev;
+	unsigned int tid = csk->tid;
+	struct tid_info *tids;
+
+	if (!cdev)
+		return;
+
+	tids = cdev->tids;
+	kfree_skb(csk->txdata_skb_cache);
+	csk->txdata_skb_cache = NULL;
+
+	if (csk->l2t_entry) {
+		cxgb4_l2t_release(csk->l2t_entry);
+		csk->l2t_entry = NULL;
+	}
+
+	cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
+	sock_put(sk);
+}
+
+static void chtls_conn_done(struct sock *sk)
+{
+	if (sock_flag(sk, SOCK_DEAD))
+		chtls_purge_receive_queue(sk);
+	sk_wakeup_sleepers(sk, 0);
+	tcp_done(sk);
+}
+
+static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
+{
+	/*
+	 * If the server is still open we clean up the child connection,
+	 * otherwise the server already did the clean up as it was purging
+	 * its SYN queue and the skb was just sitting in its backlog.
+	 */
+	if (likely(parent->sk_state == TCP_LISTEN)) {
+		cleanup_syn_rcv_conn(child, parent);
+		/* Without the below call to sock_orphan,
+		 * we leak the socket resource with syn_flood test
+		 * as inet_csk_destroy_sock will not be called
+		 * in tcp_done since SOCK_DEAD flag is not set.
+		 * Kernel handles this differently where new socket is
+		 * created only after 3 way handshake is done.
+		 */
+		sock_orphan(child);
+		percpu_counter_inc((child)->sk_prot->orphan_count);
+		chtls_release_resources(child);
+		chtls_conn_done(child);
+	} else {
+		if (csk_flag(child, CSK_RST_ABORTED)) {
+			chtls_release_resources(child);
+			chtls_conn_done(child);
+		}
+	}
+}
+
+static void pass_open_abort(struct sock *child, struct sock *parent,
+			    struct sk_buff *skb)
+{
+	do_abort_syn_rcv(child, parent);
+	kfree_skb(skb);
+}
+
+static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
+{
+	pass_open_abort(skb->sk, lsk, skb);
+}
+
+static void chtls_pass_open_arp_failure(struct sock *sk,
+					struct sk_buff *skb)
+{
+	const struct request_sock *oreq;
+	struct chtls_sock *csk;
+	struct chtls_dev *cdev;
+	struct sock *parent;
+	void *data;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	cdev = csk->cdev;
+
+	/*
+	 * If the connection is being aborted due to the parent listening
+	 * socket going away there's nothing to do, the ABORT_REQ will close
+	 * the connection.
+	 */
+	if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
+		kfree_skb(skb);
+		return;
+	}
+
+	oreq = csk->passive_reap_next;
+	data = lookup_stid(cdev->tids, oreq->ts_recent);
+	parent = ((struct listen_ctx *)data)->lsk;
+
+	bh_lock_sock(parent);
+	if (!sock_owned_by_user(parent)) {
+		pass_open_abort(sk, parent, skb);
+	} else {
+		BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
+		__sk_add_backlog(parent, skb);
+	}
+	bh_unlock_sock(parent);
+}
+
+static void chtls_accept_rpl_arp_failure(void *handle,
+					 struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)handle;
+
+	sock_hold(sk);
+	process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
+	sock_put(sk);
+}
+
+static unsigned int chtls_select_mss(const struct chtls_sock *csk,
+				     unsigned int pmtu,
+				     struct cpl_pass_accept_req *req)
+{
+	struct chtls_dev *cdev;
+	struct dst_entry *dst;
+	unsigned int tcpoptsz;
+	unsigned int iphdrsz;
+	unsigned int mtu_idx;
+	struct tcp_sock *tp;
+	unsigned int mss;
+	struct sock *sk;
+
+	mss = ntohs(req->tcpopt.mss);
+	sk = csk->sk;
+	dst = __sk_dst_get(sk);
+	cdev = csk->cdev;
+	tp = tcp_sk(sk);
+	tcpoptsz = 0;
+
+	iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
+	if (req->tcpopt.tstamp)
+		tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
+
+	tp->advmss = dst_metric_advmss(dst);
+	if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
+		tp->advmss = USER_MSS(tp);
+	if (tp->advmss > pmtu - iphdrsz)
+		tp->advmss = pmtu - iphdrsz;
+	if (mss && tp->advmss > mss)
+		tp->advmss = mss;
+
+	tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus,
+					    iphdrsz + tcpoptsz,
+					    tp->advmss - tcpoptsz,
+					    8, &mtu_idx);
+	tp->advmss -= iphdrsz;
+
+	inet_csk(sk)->icsk_pmtu_cookie = pmtu;
+	return mtu_idx;
+}
+
+static unsigned int select_rcv_wnd(struct chtls_sock *csk)
+{
+	unsigned int rcvwnd;
+	unsigned int wnd;
+	struct sock *sk;
+
+	sk = csk->sk;
+	wnd = tcp_full_space(sk);
+
+	if (wnd < MIN_RCV_WND)
+		wnd = MIN_RCV_WND;
+
+	rcvwnd = MAX_RCV_WND;
+
+	csk_set_flag(csk, CSK_UPDATE_RCV_WND);
+	return min(wnd, rcvwnd);
+}
+
+static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
+{
+	int wscale = 0;
+
+	if (space > MAX_RCV_WND)
+		space = MAX_RCV_WND;
+	if (win_clamp && win_clamp < space)
+		space = win_clamp;
+
+	if (wscale_ok) {
+		while (wscale < 14 && (65535 << wscale) < space)
+			wscale++;
+	}
+	return wscale;
+}
+
+static void chtls_pass_accept_rpl(struct sk_buff *skb,
+				  struct cpl_pass_accept_req *req,
+				  unsigned int tid)
+
+{
+	struct cpl_t5_pass_accept_rpl *rpl5;
+	struct cxgb4_lld_info *lldi;
+	const struct tcphdr *tcph;
+	const struct tcp_sock *tp;
+	struct chtls_sock *csk;
+	unsigned int len;
+	struct sock *sk;
+	u32 opt2, hlen;
+	u64 opt0;
+
+	sk = skb->sk;
+	tp = tcp_sk(sk);
+	csk = sk->sk_user_data;
+	csk->tid = tid;
+	lldi = csk->cdev->lldi;
+	len = roundup(sizeof(*rpl5), 16);
+
+	rpl5 = __skb_put_zero(skb, len);
+	INIT_TP_WR(rpl5, tid);
+
+	OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+						     csk->tid));
+	csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
+					req);
+	opt0 = TCAM_BYPASS_F |
+	       WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
+	       MSS_IDX_V(csk->mtu_idx) |
+	       L2T_IDX_V(csk->l2t_entry->idx) |
+	       NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
+	       TX_CHAN_V(csk->tx_chan) |
+	       SMAC_SEL_V(csk->smac_idx) |
+	       DSCP_V(csk->tos >> 2) |
+	       ULP_MODE_V(ULP_MODE_TLS) |
+	       RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M));
+
+	opt2 = RX_CHANNEL_V(0) |
+		RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
+
+	if (!is_t5(lldi->adapter_type))
+		opt2 |= RX_FC_DISABLE_F;
+	if (req->tcpopt.tstamp)
+		opt2 |= TSTAMPS_EN_F;
+	if (req->tcpopt.sack)
+		opt2 |= SACK_EN_F;
+	hlen = ntohl(req->hdr_len);
+
+	tcph = (struct tcphdr *)((u8 *)(req + 1) +
+			T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
+	if (tcph->ece && tcph->cwr)
+		opt2 |= CCTRL_ECN_V(1);
+	opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
+	opt2 |= T5_ISS_F;
+	opt2 |= T5_OPT_2_VALID_F;
+	rpl5->opt0 = cpu_to_be64(opt0);
+	rpl5->opt2 = cpu_to_be32(opt2);
+	rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
+	t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
+	cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
+}
+
+static void inet_inherit_port(struct inet_hashinfo *hash_info,
+			      struct sock *lsk, struct sock *newsk)
+{
+	local_bh_disable();
+	__inet_inherit_port(lsk, newsk);
+	local_bh_enable();
+}
+
+static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+	if (skb->protocol) {
+		kfree_skb(skb);
+		return 0;
+	}
+	BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
+	return 0;
+}
+
+static struct sock *chtls_recv_sock(struct sock *lsk,
+				    struct request_sock *oreq,
+				    void *network_hdr,
+				    const struct cpl_pass_accept_req *req,
+				    struct chtls_dev *cdev)
+{
+	const struct tcphdr *tcph;
+	struct inet_sock *newinet;
+	const struct iphdr *iph;
+	struct net_device *ndev;
+	struct chtls_sock *csk;
+	struct dst_entry *dst;
+	struct neighbour *n;
+	struct tcp_sock *tp;
+	struct sock *newsk;
+	u16 port_id;
+	int rxq_idx;
+	int step;
+
+	iph = (const struct iphdr *)network_hdr;
+	newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
+	if (!newsk)
+		goto free_oreq;
+
+	dst = inet_csk_route_child_sock(lsk, newsk, oreq);
+	if (!dst)
+		goto free_sk;
+
+	tcph = (struct tcphdr *)(iph + 1);
+	n = dst_neigh_lookup(dst, &iph->saddr);
+	if (!n)
+		goto free_sk;
+
+	ndev = n->dev;
+	if (!ndev)
+		goto free_dst;
+	port_id = cxgb4_port_idx(ndev);
+
+	csk = chtls_sock_create(cdev);
+	if (!csk)
+		goto free_dst;
+
+	csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0);
+	if (!csk->l2t_entry)
+		goto free_csk;
+
+	newsk->sk_user_data = csk;
+	newsk->sk_backlog_rcv = chtls_backlog_rcv;
+
+	tp = tcp_sk(newsk);
+	newinet = inet_sk(newsk);
+
+	newinet->inet_daddr = iph->saddr;
+	newinet->inet_rcv_saddr = iph->daddr;
+	newinet->inet_saddr = iph->daddr;
+
+	oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+	sk_setup_caps(newsk, dst);
+	csk->sk = newsk;
+	csk->passive_reap_next = oreq;
+	csk->tx_chan = cxgb4_port_chan(ndev);
+	csk->port_id = port_id;
+	csk->egress_dev = ndev;
+	csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+	csk->ulp_mode = ULP_MODE_TLS;
+	step = cdev->lldi->nrxq / cdev->lldi->nchan;
+	csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
+	rxq_idx = port_id * step;
+	csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
+			port_id * step;
+	csk->sndbuf = newsk->sk_sndbuf;
+	csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
+					 cxgb4_port_viid(ndev));
+	tp->rcv_wnd = select_rcv_wnd(csk);
+	RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
+					   WSCALE_OK(tp),
+					   tp->window_clamp);
+	neigh_release(n);
+	inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+	csk_set_flag(csk, CSK_CONN_INLINE);
+	bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
+
+	return newsk;
+free_csk:
+	chtls_sock_release(&csk->kref);
+free_dst:
+	dst_release(dst);
+free_sk:
+	inet_csk_prepare_forced_close(newsk);
+	tcp_done(newsk);
+free_oreq:
+	chtls_reqsk_free(oreq);
+	return NULL;
+}
+
+/*
+ * Populate a TID_RELEASE WR.  The skb must be already propely sized.
+ */
+static  void mk_tid_release(struct sk_buff *skb,
+			    unsigned int chan, unsigned int tid)
+{
+	struct cpl_tid_release *req;
+	unsigned int len;
+
+	len = roundup(sizeof(struct cpl_tid_release), 16);
+	req = (struct cpl_tid_release *)__skb_put(skb, len);
+	memset(req, 0, len);
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+	INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
+}
+
+static int chtls_get_module(struct sock *sk)
+{
+	struct inet_connection_sock *icsk = inet_csk(sk);
+
+	if (!try_module_get(icsk->icsk_ulp_ops->owner))
+		return -1;
+
+	return 0;
+}
+
+static void chtls_pass_accept_request(struct sock *sk,
+				      struct sk_buff *skb)
+{
+	struct cpl_t5_pass_accept_rpl *rpl;
+	struct cpl_pass_accept_req *req;
+	struct listen_ctx *listen_ctx;
+	struct request_sock *oreq;
+	struct sk_buff *reply_skb;
+	struct chtls_sock *csk;
+	struct chtls_dev *cdev;
+	struct tcphdr *tcph;
+	struct sock *newsk;
+	struct ethhdr *eh;
+	struct iphdr *iph;
+	void *network_hdr;
+	unsigned int stid;
+	unsigned int len;
+	unsigned int tid;
+
+	req = cplhdr(skb) + RSS_HDR;
+	tid = GET_TID(req);
+	cdev = BLOG_SKB_CB(skb)->cdev;
+	newsk = lookup_tid(cdev->tids, tid);
+	stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+	if (newsk) {
+		pr_info("tid (%d) already in use\n", tid);
+		return;
+	}
+
+	len = roundup(sizeof(*rpl), 16);
+	reply_skb = alloc_skb(len, GFP_ATOMIC);
+	if (!reply_skb) {
+		cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
+		kfree_skb(skb);
+		return;
+	}
+
+	if (sk->sk_state != TCP_LISTEN)
+		goto reject;
+
+	if (inet_csk_reqsk_queue_is_full(sk))
+		goto reject;
+
+	if (sk_acceptq_is_full(sk))
+		goto reject;
+
+	oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
+	if (!oreq)
+		goto reject;
+
+	oreq->rsk_rcv_wnd = 0;
+	oreq->rsk_window_clamp = 0;
+	oreq->cookie_ts = 0;
+	oreq->mss = 0;
+	oreq->ts_recent = 0;
+
+	eh = (struct ethhdr *)(req + 1);
+	iph = (struct iphdr *)(eh + 1);
+	if (iph->version != 0x4)
+		goto free_oreq;
+
+	network_hdr = (void *)(eh + 1);
+	tcph = (struct tcphdr *)(iph + 1);
+
+	tcp_rsk(oreq)->tfo_listener = false;
+	tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
+	chtls_set_req_port(oreq, tcph->source, tcph->dest);
+	inet_rsk(oreq)->ecn_ok = 0;
+	chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
+	if (req->tcpopt.wsf <= 14) {
+		inet_rsk(oreq)->wscale_ok = 1;
+		inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
+	}
+	inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
+
+	newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
+	if (!newsk)
+		goto reject;
+
+	if (chtls_get_module(newsk))
+		goto reject;
+	inet_csk_reqsk_queue_added(sk);
+	reply_skb->sk = newsk;
+	chtls_install_cpl_ops(newsk);
+	cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family);
+	csk = rcu_dereference_sk_user_data(newsk);
+	listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
+	csk->listen_ctx = listen_ctx;
+	__skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq);
+	chtls_pass_accept_rpl(reply_skb, req, tid);
+	kfree_skb(skb);
+	return;
+
+free_oreq:
+	chtls_reqsk_free(oreq);
+reject:
+	mk_tid_release(reply_skb, 0, tid);
+	cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+	kfree_skb(skb);
+}
+
+/*
+ * Handle a CPL_PASS_ACCEPT_REQ message.
+ */
+static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
+	struct listen_ctx *ctx;
+	unsigned int stid;
+	unsigned int tid;
+	struct sock *lsk;
+	void *data;
+
+	stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+	tid = GET_TID(req);
+
+	data = lookup_stid(cdev->tids, stid);
+	if (!data)
+		return 1;
+
+	ctx = (struct listen_ctx *)data;
+	lsk = ctx->lsk;
+
+	if (unlikely(tid >= cdev->tids->ntids)) {
+		pr_info("passive open TID %u too large\n", tid);
+		return 1;
+	}
+
+	BLOG_SKB_CB(skb)->cdev = cdev;
+	process_cpl_msg(chtls_pass_accept_request, lsk, skb);
+	return 0;
+}
+
+/*
+ * Completes some final bits of initialization for just established connections
+ * and changes their state to TCP_ESTABLISHED.
+ *
+ * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
+ */
+static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	tp->pushed_seq = snd_isn;
+	tp->write_seq = snd_isn;
+	tp->snd_nxt = snd_isn;
+	tp->snd_una = snd_isn;
+	inet_sk(sk)->inet_id = tp->write_seq ^ jiffies;
+	assign_rxopt(sk, opt);
+
+	if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
+		tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10);
+
+	smp_mb();
+	tcp_set_state(sk, TCP_ESTABLISHED);
+}
+
+static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
+{
+	struct sk_buff *abort_skb;
+
+	abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
+	if (abort_skb)
+		chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
+}
+
+static struct sock *reap_list;
+static DEFINE_SPINLOCK(reap_list_lock);
+
+/*
+ * Process the reap list.
+ */
+DECLARE_TASK_FUNC(process_reap_list, task_param)
+{
+	spin_lock_bh(&reap_list_lock);
+	while (reap_list) {
+		struct sock *sk = reap_list;
+		struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+		reap_list = csk->passive_reap_next;
+		csk->passive_reap_next = NULL;
+		spin_unlock(&reap_list_lock);
+		sock_hold(sk);
+
+		bh_lock_sock(sk);
+		chtls_abort_conn(sk, NULL);
+		sock_orphan(sk);
+		if (sk->sk_state == TCP_CLOSE)
+			inet_csk_destroy_sock(sk);
+		bh_unlock_sock(sk);
+		sock_put(sk);
+		spin_lock(&reap_list_lock);
+	}
+	spin_unlock_bh(&reap_list_lock);
+}
+
+static DECLARE_WORK(reap_task, process_reap_list);
+
+static void add_to_reap_list(struct sock *sk)
+{
+	struct chtls_sock *csk = sk->sk_user_data;
+
+	local_bh_disable();
+	bh_lock_sock(sk);
+	release_tcp_port(sk); /* release the port immediately */
+
+	spin_lock(&reap_list_lock);
+	csk->passive_reap_next = reap_list;
+	reap_list = sk;
+	if (!csk->passive_reap_next)
+		schedule_work(&reap_task);
+	spin_unlock(&reap_list_lock);
+	bh_unlock_sock(sk);
+	local_bh_enable();
+}
+
+static void add_pass_open_to_parent(struct sock *child, struct sock *lsk,
+				    struct chtls_dev *cdev)
+{
+	struct request_sock *oreq;
+	struct chtls_sock *csk;
+
+	if (lsk->sk_state != TCP_LISTEN)
+		return;
+
+	csk = child->sk_user_data;
+	oreq = csk->passive_reap_next;
+	csk->passive_reap_next = NULL;
+
+	reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq);
+	__skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
+
+	if (sk_acceptq_is_full(lsk)) {
+		chtls_reqsk_free(oreq);
+		add_to_reap_list(child);
+	} else {
+		refcount_set(&oreq->rsk_refcnt, 1);
+		inet_csk_reqsk_queue_add(lsk, oreq, child);
+		lsk->sk_data_ready(lsk);
+	}
+}
+
+static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
+{
+	struct sock *child = skb->sk;
+
+	skb->sk = NULL;
+	add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
+	kfree_skb(skb);
+}
+
+static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
+	struct chtls_sock *csk;
+	struct sock *lsk, *sk;
+	unsigned int hwtid;
+
+	hwtid = GET_TID(req);
+	sk = lookup_tid(cdev->tids, hwtid);
+	if (!sk)
+		return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
+
+	bh_lock_sock(sk);
+	if (unlikely(sock_owned_by_user(sk))) {
+		kfree_skb(skb);
+	} else {
+		unsigned int stid;
+		void *data;
+
+		csk = sk->sk_user_data;
+		csk->wr_max_credits = 64;
+		csk->wr_credits = 64;
+		csk->wr_unacked = 0;
+		make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+		stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+		sk->sk_state_change(sk);
+		if (unlikely(sk->sk_socket))
+			sk_wake_async(sk, 0, POLL_OUT);
+
+		data = lookup_stid(cdev->tids, stid);
+		lsk = ((struct listen_ctx *)data)->lsk;
+
+		bh_lock_sock(lsk);
+		if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) {
+			/* removed from synq */
+			bh_unlock_sock(lsk);
+			kfree_skb(skb);
+			goto unlock;
+		}
+
+		if (likely(!sock_owned_by_user(lsk))) {
+			kfree_skb(skb);
+			add_pass_open_to_parent(sk, lsk, cdev);
+		} else {
+			skb->sk = sk;
+			BLOG_SKB_CB(skb)->cdev = cdev;
+			BLOG_SKB_CB(skb)->backlog_rcv =
+				bl_add_pass_open_to_parent;
+			__sk_add_backlog(lsk, skb);
+		}
+		bh_unlock_sock(lsk);
+	}
+unlock:
+	bh_unlock_sock(sk);
+	return 0;
+}
+
+/*
+ * Handle receipt of an urgent pointer.
+ */
+static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	urg_seq--;
+	if (tp->urg_data && !after(urg_seq, tp->urg_seq))
+		return;	/* duplicate pointer */
+
+	sk_send_sigurg(sk);
+	if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
+	    !sock_flag(sk, SOCK_URGINLINE) &&
+	    tp->copied_seq != tp->rcv_nxt) {
+		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+
+		tp->copied_seq++;
+		if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
+			chtls_free_skb(sk, skb);
+	}
+
+	tp->urg_data = TCP_URG_NOTYET;
+	tp->urg_seq = urg_seq;
+}
+
+static void check_sk_callbacks(struct chtls_sock *csk)
+{
+	struct sock *sk = csk->sk;
+
+	if (unlikely(sk->sk_user_data &&
+		     !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD)))
+		csk_set_flag(csk, CSK_CALLBACKS_CHKD);
+}
+
+/*
+ * Handles Rx data that arrives in a state where the socket isn't accepting
+ * new data.
+ */
+static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
+{
+	if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
+		chtls_abort_conn(sk, skb);
+
+	kfree_skb(skb);
+}
+
+static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
+{
+	struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
+	struct chtls_sock *csk;
+	struct tcp_sock *tp;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	tp = tcp_sk(sk);
+
+	if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
+		handle_excess_rx(sk, skb);
+		return;
+	}
+
+	ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
+	ULP_SKB_CB(skb)->psh = hdr->psh;
+	skb_ulp_mode(skb) = ULP_MODE_NONE;
+
+	skb_reset_transport_header(skb);
+	__skb_pull(skb, sizeof(*hdr) + RSS_HDR);
+	if (!skb->data_len)
+		__skb_trim(skb, ntohs(hdr->len));
+
+	if (unlikely(hdr->urg))
+		handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
+	if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
+		     tp->urg_seq - tp->rcv_nxt < skb->len))
+		tp->urg_data = TCP_URG_VALID |
+			       skb->data[tp->urg_seq - tp->rcv_nxt];
+
+	if (unlikely(hdr->dack_mode != csk->delack_mode)) {
+		csk->delack_mode = hdr->dack_mode;
+		csk->delack_seq = tp->rcv_nxt;
+	}
+
+	tcp_hdr(skb)->fin = 0;
+	tp->rcv_nxt += skb->len;
+
+	__skb_queue_tail(&sk->sk_receive_queue, skb);
+
+	if (!sock_flag(sk, SOCK_DEAD)) {
+		check_sk_callbacks(csk);
+		sk->sk_data_ready(sk);
+	}
+}
+
+static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
+	unsigned int hwtid = GET_TID(req);
+	struct sock *sk;
+
+	sk = lookup_tid(cdev->tids, hwtid);
+	if (unlikely(!sk)) {
+		pr_err("can't find conn. for hwtid %u.\n", hwtid);
+		return -EINVAL;
+	}
+	skb_dst_set(skb, NULL);
+	process_cpl_msg(chtls_recv_data, sk, skb);
+	return 0;
+}
+
+static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
+{
+	struct cpl_tls_data *hdr = cplhdr(skb);
+	struct chtls_sock *csk;
+	struct chtls_hws *tlsk;
+	struct tcp_sock *tp;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	tlsk = &csk->tlshws;
+	tp = tcp_sk(sk);
+
+	if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
+		handle_excess_rx(sk, skb);
+		return;
+	}
+
+	ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
+	ULP_SKB_CB(skb)->flags = 0;
+	skb_ulp_mode(skb) = ULP_MODE_TLS;
+
+	skb_reset_transport_header(skb);
+	__skb_pull(skb, sizeof(*hdr));
+	if (!skb->data_len)
+		__skb_trim(skb,
+			   CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)));
+
+	if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq -
+		     tp->rcv_nxt < skb->len))
+		tp->urg_data = TCP_URG_VALID |
+			       skb->data[tp->urg_seq - tp->rcv_nxt];
+
+	tcp_hdr(skb)->fin = 0;
+	tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd));
+	__skb_queue_tail(&tlsk->sk_recv_queue, skb);
+}
+
+static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_tls_data *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	struct sock *sk;
+
+	sk = lookup_tid(cdev->tids, hwtid);
+	if (unlikely(!sk)) {
+		pr_err("can't find conn. for hwtid %u.\n", hwtid);
+		return -EINVAL;
+	}
+	skb_dst_set(skb, NULL);
+	process_cpl_msg(chtls_recv_pdu, sk, skb);
+	return 0;
+}
+
+static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
+{
+	struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
+
+	skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
+	tls_cmp_hdr->length = ntohs((__force __be16)nlen);
+}
+
+static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
+{
+	struct tlsrx_cmp_hdr *tls_hdr_pkt;
+	struct cpl_rx_tls_cmp *cmp_cpl;
+	struct sk_buff *skb_rec;
+	struct chtls_sock *csk;
+	struct chtls_hws *tlsk;
+	struct tcp_sock *tp;
+
+	cmp_cpl = cplhdr(skb);
+	csk = rcu_dereference_sk_user_data(sk);
+	tlsk = &csk->tlshws;
+	tp = tcp_sk(sk);
+
+	ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
+	ULP_SKB_CB(skb)->flags = 0;
+
+	skb_reset_transport_header(skb);
+	__skb_pull(skb, sizeof(*cmp_cpl));
+	tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
+	if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
+		tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
+	if (!skb->data_len)
+		__skb_trim(skb, TLS_HEADER_LENGTH);
+
+	tp->rcv_nxt +=
+		CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
+
+	ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
+	skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
+	if (!skb_rec) {
+		__skb_queue_tail(&sk->sk_receive_queue, skb);
+	} else {
+		chtls_set_hdrlen(skb, tlsk->pldlen);
+		tlsk->pldlen = 0;
+		__skb_queue_tail(&sk->sk_receive_queue, skb);
+		__skb_queue_tail(&sk->sk_receive_queue, skb_rec);
+	}
+
+	if (!sock_flag(sk, SOCK_DEAD)) {
+		check_sk_callbacks(csk);
+		sk->sk_data_ready(sk);
+	}
+}
+
+static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_rx_tls_cmp *req = cplhdr(skb);
+	unsigned int hwtid = GET_TID(req);
+	struct sock *sk;
+
+	sk = lookup_tid(cdev->tids, hwtid);
+	if (unlikely(!sk)) {
+		pr_err("can't find conn. for hwtid %u.\n", hwtid);
+		return -EINVAL;
+	}
+	skb_dst_set(skb, NULL);
+	process_cpl_msg(chtls_rx_hdr, sk, skb);
+
+	return 0;
+}
+
+static void chtls_timewait(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	tp->rcv_nxt++;
+	tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
+	tp->srtt_us = 0;
+	tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+}
+
+static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+	sk->sk_shutdown |= RCV_SHUTDOWN;
+	sock_set_flag(sk, SOCK_DONE);
+
+	switch (sk->sk_state) {
+	case TCP_SYN_RECV:
+	case TCP_ESTABLISHED:
+		tcp_set_state(sk, TCP_CLOSE_WAIT);
+		break;
+	case TCP_FIN_WAIT1:
+		tcp_set_state(sk, TCP_CLOSING);
+		break;
+	case TCP_FIN_WAIT2:
+		chtls_release_resources(sk);
+		if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+			chtls_conn_done(sk);
+		else
+			chtls_timewait(sk);
+		break;
+	default:
+		pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
+	}
+
+	if (!sock_flag(sk, SOCK_DEAD)) {
+		sk->sk_state_change(sk);
+		/* Do not send POLL_HUP for half duplex close. */
+
+		if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
+		    sk->sk_state == TCP_CLOSE)
+			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
+		else
+			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+	}
+}
+
+static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
+{
+	struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
+	struct chtls_sock *csk;
+	struct tcp_sock *tp;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	tp = tcp_sk(sk);
+
+	tp->snd_una = ntohl(rpl->snd_nxt) - 1;  /* exclude FIN */
+
+	switch (sk->sk_state) {
+	case TCP_CLOSING:
+		chtls_release_resources(sk);
+		if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+			chtls_conn_done(sk);
+		else
+			chtls_timewait(sk);
+		break;
+	case TCP_LAST_ACK:
+		chtls_release_resources(sk);
+		chtls_conn_done(sk);
+		break;
+	case TCP_FIN_WAIT1:
+		tcp_set_state(sk, TCP_FIN_WAIT2);
+		sk->sk_shutdown |= SEND_SHUTDOWN;
+
+		if (!sock_flag(sk, SOCK_DEAD))
+			sk->sk_state_change(sk);
+		else if (tcp_sk(sk)->linger2 < 0 &&
+			 !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
+			chtls_abort_conn(sk, skb);
+		break;
+	default:
+		pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
+	}
+	kfree_skb(skb);
+}
+
+static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
+				   size_t len, gfp_t gfp)
+{
+	if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
+		WARN_ONCE(skb->len < len, "skb alloc error");
+		__skb_trim(skb, len);
+		skb_get(skb);
+	} else {
+		skb = alloc_skb(len, gfp);
+		if (skb)
+			__skb_put(skb, len);
+	}
+	return skb;
+}
+
+static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
+			     int cmd)
+{
+	struct cpl_abort_rpl *rpl = cplhdr(skb);
+
+	INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid);
+	rpl->cmd = cmd;
+}
+
+static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_abort_req_rss *req = cplhdr(skb);
+	struct sk_buff *reply_skb;
+
+	reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
+			      GFP_KERNEL | __GFP_NOFAIL);
+	__skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+	set_abort_rpl_wr(reply_skb, GET_TID(req),
+			 (req->status & CPL_ABORT_NO_RST));
+	set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
+	cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+	kfree_skb(skb);
+}
+
+static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
+			   struct chtls_dev *cdev, int status, int queue)
+{
+	struct cpl_abort_req_rss *req = cplhdr(skb);
+	struct sk_buff *reply_skb;
+	struct chtls_sock *csk;
+
+	csk = rcu_dereference_sk_user_data(sk);
+
+	reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
+			      GFP_KERNEL);
+
+	if (!reply_skb) {
+		req->status = (queue << 1);
+		send_defer_abort_rpl(cdev, skb);
+		return;
+	}
+
+	set_abort_rpl_wr(reply_skb, GET_TID(req), status);
+	kfree_skb(skb);
+
+	set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
+	if (csk_conn_inline(csk)) {
+		struct l2t_entry *e = csk->l2t_entry;
+
+		if (e && sk->sk_state != TCP_SYN_RECV) {
+			cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
+			return;
+		}
+	}
+	cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+}
+
+/*
+ * Add an skb to the deferred skb queue for processing from process context.
+ */
+static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
+			   defer_handler_t handler)
+{
+	DEFERRED_SKB_CB(skb)->handler = handler;
+	spin_lock_bh(&cdev->deferq.lock);
+	__skb_queue_tail(&cdev->deferq, skb);
+	if (skb_queue_len(&cdev->deferq) == 1)
+		schedule_work(&cdev->deferq_task);
+	spin_unlock_bh(&cdev->deferq.lock);
+}
+
+static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
+				 struct chtls_dev *cdev,
+				 int status, int queue)
+{
+	struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
+	struct sk_buff *reply_skb;
+	struct chtls_sock *csk;
+	unsigned int tid;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	tid = GET_TID(req);
+
+	reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
+	if (!reply_skb) {
+		req->status = (queue << 1) | status;
+		t4_defer_reply(skb, cdev, send_defer_abort_rpl);
+		return;
+	}
+
+	set_abort_rpl_wr(reply_skb, tid, status);
+	set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
+	if (csk_conn_inline(csk)) {
+		struct l2t_entry *e = csk->l2t_entry;
+
+		if (e && sk->sk_state != TCP_SYN_RECV) {
+			cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
+			return;
+		}
+	}
+	cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+	kfree_skb(skb);
+}
+
+/*
+ * This is run from a listener's backlog to abort a child connection in
+ * SYN_RCV state (i.e., one on the listener's SYN queue).
+ */
+static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
+{
+	struct chtls_sock *csk;
+	struct sock *child;
+	int queue;
+
+	child = skb->sk;
+	csk = rcu_dereference_sk_user_data(child);
+	queue = csk->txq_idx;
+
+	skb->sk	= NULL;
+	do_abort_syn_rcv(child, lsk);
+	send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+		       CPL_ABORT_NO_RST, queue);
+}
+
+static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
+{
+	const struct request_sock *oreq;
+	struct listen_ctx *listen_ctx;
+	struct chtls_sock *csk;
+	struct chtls_dev *cdev;
+	struct sock *psk;
+	void *ctx;
+
+	csk = sk->sk_user_data;
+	oreq = csk->passive_reap_next;
+	cdev = csk->cdev;
+
+	if (!oreq)
+		return -1;
+
+	ctx = lookup_stid(cdev->tids, oreq->ts_recent);
+	if (!ctx)
+		return -1;
+
+	listen_ctx = (struct listen_ctx *)ctx;
+	psk = listen_ctx->lsk;
+
+	bh_lock_sock(psk);
+	if (!sock_owned_by_user(psk)) {
+		int queue = csk->txq_idx;
+
+		do_abort_syn_rcv(sk, psk);
+		send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
+	} else {
+		skb->sk = sk;
+		BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
+		__sk_add_backlog(psk, skb);
+	}
+	bh_unlock_sock(psk);
+	return 0;
+}
+
+static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
+{
+	const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
+	struct chtls_sock *csk = sk->sk_user_data;
+	int rst_status = CPL_ABORT_NO_RST;
+	int queue = csk->txq_idx;
+
+	if (is_neg_adv(req->status)) {
+		if (sk->sk_state == TCP_SYN_RECV)
+			chtls_set_tcb_tflag(sk, 0, 0);
+
+		kfree_skb(skb);
+		return;
+	}
+
+	csk_reset_flag(csk, CSK_ABORT_REQ_RCVD);
+
+	if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) &&
+	    !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
+		struct tcp_sock *tp = tcp_sk(sk);
+
+		if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
+			WARN_ONCE(1, "send_tx_flowc error");
+		csk_set_flag(csk, CSK_TX_DATA_SENT);
+	}
+
+	csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
+
+	if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
+		sk->sk_err = ETIMEDOUT;
+
+		if (!sock_flag(sk, SOCK_DEAD))
+			sk->sk_error_report(sk);
+
+		if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
+			return;
+
+		chtls_release_resources(sk);
+		chtls_conn_done(sk);
+	}
+
+	chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
+}
+
+static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
+{
+	struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
+	struct chtls_sock *csk;
+	struct chtls_dev *cdev;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	cdev = csk->cdev;
+
+	if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
+		csk_reset_flag(csk, CSK_ABORT_RPL_PENDING);
+		if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) {
+			if (sk->sk_state == TCP_SYN_SENT) {
+				cxgb4_remove_tid(cdev->tids,
+						 csk->port_id,
+						 GET_TID(rpl),
+						 sk->sk_family);
+				sock_put(sk);
+			}
+			chtls_release_resources(sk);
+			chtls_conn_done(sk);
+		}
+	}
+	kfree_skb(skb);
+}
+
+static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
+	void (*fn)(struct sock *sk, struct sk_buff *skb);
+	unsigned int hwtid = GET_TID(req);
+	struct sock *sk;
+	u8 opcode;
+
+	opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
+
+	sk = lookup_tid(cdev->tids, hwtid);
+	if (!sk)
+		goto rel_skb;
+
+	switch (opcode) {
+	case CPL_PEER_CLOSE:
+		fn = chtls_peer_close;
+		break;
+	case CPL_CLOSE_CON_RPL:
+		fn = chtls_close_con_rpl;
+		break;
+	case CPL_ABORT_REQ_RSS:
+		fn = chtls_abort_req_rss;
+		break;
+	case CPL_ABORT_RPL_RSS:
+		fn = chtls_abort_rpl_rss;
+		break;
+	default:
+		goto rel_skb;
+	}
+
+	process_cpl_msg(fn, sk, skb);
+	return 0;
+
+rel_skb:
+	kfree_skb(skb);
+	return 0;
+}
+
+static struct sk_buff *dequeue_wr(struct sock *sk)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct sk_buff *skb = csk->wr_skb_head;
+
+	if (likely(skb)) {
+	/* Don't bother clearing the tail */
+		csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
+		WR_SKB_CB(skb)->next_wr = NULL;
+	}
+	return skb;
+}
+
+static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
+{
+	struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
+	struct chtls_sock *csk = sk->sk_user_data;
+	struct tcp_sock *tp = tcp_sk(sk);
+	u32 credits = hdr->credits;
+	u32 snd_una;
+
+	snd_una = ntohl(hdr->snd_una);
+	csk->wr_credits += credits;
+
+	if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits)
+		csk->wr_unacked = csk->wr_max_credits - csk->wr_credits;
+
+	while (credits) {
+		struct sk_buff *pskb = csk->wr_skb_head;
+		u32 csum;
+
+		if (unlikely(!pskb)) {
+			if (csk->wr_nondata)
+				csk->wr_nondata -= credits;
+			break;
+		}
+		csum = (__force u32)pskb->csum;
+		if (unlikely(credits < csum)) {
+			pskb->csum = (__force __wsum)(csum - credits);
+			break;
+		}
+		dequeue_wr(sk);
+		credits -= csum;
+		kfree_skb(pskb);
+	}
+	if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
+		if (unlikely(before(snd_una, tp->snd_una))) {
+			kfree_skb(skb);
+			return;
+		}
+
+		if (tp->snd_una != snd_una) {
+			tp->snd_una = snd_una;
+			tp->rcv_tstamp = tcp_time_stamp(tp);
+			if (tp->snd_una == tp->snd_nxt &&
+			    !csk_flag_nochk(csk, CSK_TX_FAILOVER))
+				csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+		}
+	}
+
+	if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) {
+		unsigned int fclen16 = roundup(failover_flowc_wr_len, 16);
+
+		csk->wr_credits -= fclen16;
+		csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
+		csk_reset_flag(csk, CSK_TX_FAILOVER);
+	}
+	if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0))
+		sk->sk_write_space(sk);
+
+	kfree_skb(skb);
+}
+
+static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+	struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
+	unsigned int hwtid = GET_TID(rpl);
+	struct sock *sk;
+
+	sk = lookup_tid(cdev->tids, hwtid);
+	if (unlikely(!sk)) {
+		pr_err("can't find conn. for hwtid %u.\n", hwtid);
+		return -EINVAL;
+	}
+	process_cpl_msg(chtls_rx_ack, sk, skb);
+
+	return 0;
+}
+
+chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
+	[CPL_PASS_OPEN_RPL]     = chtls_pass_open_rpl,
+	[CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
+	[CPL_PASS_ACCEPT_REQ]   = chtls_pass_accept_req,
+	[CPL_PASS_ESTABLISH]    = chtls_pass_establish,
+	[CPL_RX_DATA]           = chtls_rx_data,
+	[CPL_TLS_DATA]          = chtls_rx_pdu,
+	[CPL_RX_TLS_CMP]        = chtls_rx_cmp,
+	[CPL_PEER_CLOSE]        = chtls_conn_cpl,
+	[CPL_CLOSE_CON_RPL]     = chtls_conn_cpl,
+	[CPL_ABORT_REQ_RSS]     = chtls_conn_cpl,
+	[CPL_ABORT_RPL_RSS]     = chtls_conn_cpl,
+	[CPL_FW4_ACK]           = chtls_wr_ack,
+};
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
new file mode 100644
index 0000000..78eb3af
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CHTLS_CM_H__
+#define __CHTLS_CM_H__
+
+/*
+ * TCB settings
+ */
+/* 3:0 */
+#define TCB_ULP_TYPE_W    0
+#define TCB_ULP_TYPE_S    0
+#define TCB_ULP_TYPE_M    0xfULL
+#define TCB_ULP_TYPE_V(x) ((x) << TCB_ULP_TYPE_S)
+
+/* 11:4 */
+#define TCB_ULP_RAW_W    0
+#define TCB_ULP_RAW_S    4
+#define TCB_ULP_RAW_M    0xffULL
+#define TCB_ULP_RAW_V(x) ((x) << TCB_ULP_RAW_S)
+
+#define TF_TLS_KEY_SIZE_S    7
+#define TF_TLS_KEY_SIZE_V(x) ((x) << TF_TLS_KEY_SIZE_S)
+
+#define TF_TLS_CONTROL_S     2
+#define TF_TLS_CONTROL_V(x) ((x) << TF_TLS_CONTROL_S)
+
+#define TF_TLS_ACTIVE_S      1
+#define TF_TLS_ACTIVE_V(x) ((x) << TF_TLS_ACTIVE_S)
+
+#define TF_TLS_ENABLE_S      0
+#define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S)
+
+#define TF_RX_QUIESCE_S    15
+#define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S)
+
+/*
+ * Max receive window supported by HW in bytes.  Only a small part of it can
+ * be set through option0, the rest needs to be set through RX_DATA_ACK.
+ */
+#define MAX_RCV_WND ((1U << 27) - 1)
+#define MAX_MSS     65536
+
+/*
+ * Min receive window.  We want it to be large enough to accommodate receive
+ * coalescing, handle jumbo frames, and not trigger sender SWS avoidance.
+ */
+#define MIN_RCV_WND (24 * 1024U)
+#define LOOPBACK(x)     (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+
+/* ulp_mem_io + ulptx_idata + payload + padding */
+#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
+
+/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+#define TX_HEADER_LEN \
+	(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+#define TX_TLSHDR_LEN \
+	(sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo) + \
+	 sizeof(struct sge_opaque_hdr))
+#define TXDATA_SKB_LEN 128
+
+enum {
+	CPL_TX_TLS_SFO_TYPE_CCS,
+	CPL_TX_TLS_SFO_TYPE_ALERT,
+	CPL_TX_TLS_SFO_TYPE_HANDSHAKE,
+	CPL_TX_TLS_SFO_TYPE_DATA,
+	CPL_TX_TLS_SFO_TYPE_HEARTBEAT,
+};
+
+enum {
+	TLS_HDR_TYPE_CCS = 20,
+	TLS_HDR_TYPE_ALERT,
+	TLS_HDR_TYPE_HANDSHAKE,
+	TLS_HDR_TYPE_RECORD,
+	TLS_HDR_TYPE_HEARTBEAT,
+};
+
+typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb);
+extern struct request_sock_ops chtls_rsk_ops;
+
+struct deferred_skb_cb {
+	defer_handler_t handler;
+	struct chtls_dev *dev;
+};
+
+#define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
+#define failover_flowc_wr_len offsetof(struct fw_flowc_wr, mnemval[3])
+#define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
+#define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
+
+#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
+#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
+#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
+#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
+#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
+#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
+#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
+#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
+
+/* TLS SKB */
+#define skb_ulp_tls_inline(skb)      (ULP_SKB_CB(skb)->ulp.tls.ofld)
+#define skb_ulp_tls_iv_imm(skb)      (ULP_SKB_CB(skb)->ulp.tls.iv)
+
+void chtls_defer_reply(struct sk_buff *skb, struct chtls_dev *dev,
+		       defer_handler_t handler);
+
+/*
+ * Returns true if the socket is in one of the supplied states.
+ */
+static inline unsigned int sk_in_state(const struct sock *sk,
+				       unsigned int states)
+{
+	return states & (1 << sk->sk_state);
+}
+
+static void chtls_rsk_destructor(struct request_sock *req)
+{
+	/* do nothing */
+}
+
+static inline void chtls_init_rsk_ops(struct proto *chtls_tcp_prot,
+				      struct request_sock_ops *chtls_tcp_ops,
+				      struct proto *tcp_prot, int family)
+{
+	memset(chtls_tcp_ops, 0, sizeof(*chtls_tcp_ops));
+	chtls_tcp_ops->family = family;
+	chtls_tcp_ops->obj_size = sizeof(struct tcp_request_sock);
+	chtls_tcp_ops->destructor = chtls_rsk_destructor;
+	chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab;
+	chtls_tcp_prot->rsk_prot = chtls_tcp_ops;
+}
+
+static inline void chtls_reqsk_free(struct request_sock *req)
+{
+	if (req->rsk_listener)
+		sock_put(req->rsk_listener);
+	kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+#define DECLARE_TASK_FUNC(task, task_param) \
+		static void task(struct work_struct *task_param)
+
+static inline void sk_wakeup_sleepers(struct sock *sk, bool interruptable)
+{
+	struct socket_wq *wq;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (skwq_has_sleeper(wq)) {
+		if (interruptable)
+			wake_up_interruptible(sk_sleep(sk));
+		else
+			wake_up_all(sk_sleep(sk));
+	}
+	rcu_read_unlock();
+}
+
+static inline void chtls_set_req_port(struct request_sock *oreq,
+				      __be16 source, __be16 dest)
+{
+	inet_rsk(oreq)->ir_rmt_port = source;
+	inet_rsk(oreq)->ir_num = ntohs(dest);
+}
+
+static inline void chtls_set_req_addr(struct request_sock *oreq,
+				      __be32 local_ip, __be32 peer_ip)
+{
+	inet_rsk(oreq)->ir_loc_addr = local_ip;
+	inet_rsk(oreq)->ir_rmt_addr = peer_ip;
+}
+
+static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
+{
+	skb_dst_set(skb, NULL);
+	__skb_unlink(skb, &sk->sk_receive_queue);
+	__kfree_skb(skb);
+}
+
+static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
+{
+	skb_dst_set(skb, NULL);
+	__skb_unlink(skb, &sk->sk_receive_queue);
+	kfree_skb(skb);
+}
+
+static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
+{
+	WR_SKB_CB(skb)->next_wr = NULL;
+
+	skb_get(skb);
+
+	if (!csk->wr_skb_head)
+		csk->wr_skb_head = skb;
+	else
+		WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
+	csk->wr_skb_tail = skb;
+}
+#endif
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
new file mode 100644
index 0000000..4909607
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/tls.h>
+#include <net/tls.h>
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+static void __set_tcb_field_direct(struct chtls_sock *csk,
+				   struct cpl_set_tcb_field *req, u16 word,
+				   u64 mask, u64 val, u8 cookie, int no_reply)
+{
+	struct ulptx_idata *sc;
+
+	INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, csk->tid);
+	req->wr.wr_mid |= htonl(FW_WR_FLOWID_V(csk->tid));
+	req->reply_ctrl = htons(NO_REPLY_V(no_reply) |
+				QUEUENO_V(csk->rss_qid));
+	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
+	req->mask = cpu_to_be64(mask);
+	req->val = cpu_to_be64(val);
+	sc = (struct ulptx_idata *)(req + 1);
+	sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+	sc->len = htonl(0);
+}
+
+static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word,
+			    u64 mask, u64 val, u8 cookie, int no_reply)
+{
+	struct cpl_set_tcb_field *req;
+	struct chtls_sock *csk;
+	struct ulptx_idata *sc;
+	unsigned int wrlen;
+
+	wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
+	csk = rcu_dereference_sk_user_data(sk);
+
+	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
+	__set_tcb_field_direct(csk, req, word, mask, val, cookie, no_reply);
+	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+}
+
+/*
+ * Send control message to HW, message go as immediate data and packet
+ * is freed immediately.
+ */
+static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
+{
+	struct cpl_set_tcb_field *req;
+	unsigned int credits_needed;
+	struct chtls_sock *csk;
+	struct ulptx_idata *sc;
+	struct sk_buff *skb;
+	unsigned int wrlen;
+	int ret;
+
+	wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
+
+	skb = alloc_skb(wrlen, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	credits_needed = DIV_ROUND_UP(wrlen, 16);
+	csk = rcu_dereference_sk_user_data(sk);
+
+	__set_tcb_field(sk, skb, word, mask, val, 0, 1);
+	skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+	csk->wr_credits -= credits_needed;
+	csk->wr_unacked += credits_needed;
+	enqueue_wr(csk, skb);
+	ret = cxgb4_ofld_send(csk->egress_dev, skb);
+	if (ret < 0)
+		kfree_skb(skb);
+	return ret < 0 ? ret : 0;
+}
+
+/*
+ * Set one of the t_flags bits in the TCB.
+ */
+int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
+{
+	return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
+				   (u64)val << bit_pos);
+}
+
+static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
+{
+	return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid);
+}
+
+static int chtls_set_tcb_seqno(struct sock *sk)
+{
+	return chtls_set_tcb_field(sk, 28, ~0ULL, 0);
+}
+
+static int chtls_set_tcb_quiesce(struct sock *sk, int val)
+{
+	return chtls_set_tcb_field(sk, 1, (1ULL << TF_RX_QUIESCE_S),
+				   TF_RX_QUIESCE_V(val));
+}
+
+/* TLS Key bitmap processing */
+int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
+{
+	unsigned int num_key_ctx, bsize;
+	int ksize;
+
+	num_key_ctx = (lldi->vr->key.size / TLS_KEY_CONTEXT_SZ);
+	bsize = BITS_TO_LONGS(num_key_ctx);
+
+	cdev->kmap.size = num_key_ctx;
+	cdev->kmap.available = bsize;
+	ksize = sizeof(*cdev->kmap.addr) * bsize;
+	cdev->kmap.addr = kvzalloc(ksize, GFP_KERNEL);
+	if (!cdev->kmap.addr)
+		return -ENOMEM;
+
+	cdev->kmap.start = lldi->vr->key.start;
+	spin_lock_init(&cdev->kmap.lock);
+	return 0;
+}
+
+static int get_new_keyid(struct chtls_sock *csk, u32 optname)
+{
+	struct net_device *dev = csk->egress_dev;
+	struct chtls_dev *cdev = csk->cdev;
+	struct chtls_hws *hws;
+	struct adapter *adap;
+	int keyid;
+
+	adap = netdev2adap(dev);
+	hws = &csk->tlshws;
+
+	spin_lock_bh(&cdev->kmap.lock);
+	keyid = find_first_zero_bit(cdev->kmap.addr, cdev->kmap.size);
+	if (keyid < cdev->kmap.size) {
+		__set_bit(keyid, cdev->kmap.addr);
+		if (optname == TLS_RX)
+			hws->rxkey = keyid;
+		else
+			hws->txkey = keyid;
+		atomic_inc(&adap->chcr_stats.tls_key);
+	} else {
+		keyid = -1;
+	}
+	spin_unlock_bh(&cdev->kmap.lock);
+	return keyid;
+}
+
+void free_tls_keyid(struct sock *sk)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct net_device *dev = csk->egress_dev;
+	struct chtls_dev *cdev = csk->cdev;
+	struct chtls_hws *hws;
+	struct adapter *adap;
+
+	if (!cdev->kmap.addr)
+		return;
+
+	adap = netdev2adap(dev);
+	hws = &csk->tlshws;
+
+	spin_lock_bh(&cdev->kmap.lock);
+	if (hws->rxkey >= 0) {
+		__clear_bit(hws->rxkey, cdev->kmap.addr);
+		atomic_dec(&adap->chcr_stats.tls_key);
+		hws->rxkey = -1;
+	}
+	if (hws->txkey >= 0) {
+		__clear_bit(hws->txkey, cdev->kmap.addr);
+		atomic_dec(&adap->chcr_stats.tls_key);
+		hws->txkey = -1;
+	}
+	spin_unlock_bh(&cdev->kmap.lock);
+}
+
+unsigned int keyid_to_addr(int start_addr, int keyid)
+{
+	return (start_addr + (keyid * TLS_KEY_CONTEXT_SZ)) >> 5;
+}
+
+static void chtls_rxkey_ivauth(struct _key_ctx *kctx)
+{
+	kctx->iv_to_auth = cpu_to_be64(KEYCTX_TX_WR_IV_V(6ULL) |
+				  KEYCTX_TX_WR_AAD_V(1ULL) |
+				  KEYCTX_TX_WR_AADST_V(5ULL) |
+				  KEYCTX_TX_WR_CIPHER_V(14ULL) |
+				  KEYCTX_TX_WR_CIPHERST_V(0ULL) |
+				  KEYCTX_TX_WR_AUTH_V(14ULL) |
+				  KEYCTX_TX_WR_AUTHST_V(16ULL) |
+				  KEYCTX_TX_WR_AUTHIN_V(16ULL));
+}
+
+static int chtls_key_info(struct chtls_sock *csk,
+			  struct _key_ctx *kctx,
+			  u32 keylen, u32 optname)
+{
+	unsigned char key[AES_KEYSIZE_128];
+	struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
+	unsigned char ghash_h[AEAD_H_SIZE];
+	struct crypto_cipher *cipher;
+	int ck_size, key_ctx_size;
+	int ret;
+
+	gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *)
+		  &csk->tlshws.crypto_info;
+
+	key_ctx_size = sizeof(struct _key_ctx) +
+		       roundup(keylen, 16) + AEAD_H_SIZE;
+
+	if (keylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else {
+		pr_err("GCM: Invalid key length %d\n", keylen);
+		return -EINVAL;
+	}
+	memcpy(key, gcm_ctx->key, keylen);
+
+	/* Calculate the H = CIPH(K, 0 repeated 16 times).
+	 * It will go in key context
+	 */
+	cipher = crypto_alloc_cipher("aes", 0, 0);
+	if (IS_ERR(cipher)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = crypto_cipher_setkey(cipher, key, keylen);
+	if (ret)
+		goto out1;
+
+	memset(ghash_h, 0, AEAD_H_SIZE);
+	crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+	csk->tlshws.keylen = key_ctx_size;
+
+	/* Copy the Key context */
+	if (optname == TLS_RX) {
+		int key_ctx;
+
+		key_ctx = ((key_ctx_size >> 4) << 3);
+		kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size,
+						 CHCR_KEYCTX_MAC_KEY_SIZE_128,
+						 0, 0, key_ctx);
+		chtls_rxkey_ivauth(kctx);
+	} else {
+		kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+						 CHCR_KEYCTX_MAC_KEY_SIZE_128,
+						 0, 0, key_ctx_size >> 4);
+	}
+
+	memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+	memcpy(kctx->key, gcm_ctx->key, keylen);
+	memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE);
+	/* erase key info from driver */
+	memset(gcm_ctx->key, 0, keylen);
+
+out1:
+	crypto_free_cipher(cipher);
+out:
+	return ret;
+}
+
+static void chtls_set_scmd(struct chtls_sock *csk)
+{
+	struct chtls_hws *hws = &csk->tlshws;
+
+	hws->scmd.seqno_numivs =
+		SCMD_SEQ_NO_CTRL_V(3) |
+		SCMD_PROTO_VERSION_V(0) |
+		SCMD_ENC_DEC_CTRL_V(0) |
+		SCMD_CIPH_AUTH_SEQ_CTRL_V(1) |
+		SCMD_CIPH_MODE_V(2) |
+		SCMD_AUTH_MODE_V(4) |
+		SCMD_HMAC_CTRL_V(0) |
+		SCMD_IV_SIZE_V(4) |
+		SCMD_NUM_IVS_V(1);
+
+	hws->scmd.ivgen_hdrlen =
+		SCMD_IV_GEN_CTRL_V(1) |
+		SCMD_KEY_CTX_INLINE_V(0) |
+		SCMD_TLS_FRAG_ENABLE_V(1);
+}
+
+int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+{
+	struct tls_key_req *kwr;
+	struct chtls_dev *cdev;
+	struct _key_ctx *kctx;
+	int wrlen, klen, len;
+	struct sk_buff *skb;
+	struct sock *sk;
+	int keyid;
+	int kaddr;
+	int ret;
+
+	cdev = csk->cdev;
+	sk = csk->sk;
+
+	klen = roundup((keylen + AEAD_H_SIZE) + sizeof(*kctx), 32);
+	wrlen = roundup(sizeof(*kwr), 16);
+	len = klen + wrlen;
+
+	/* Flush out-standing data before new key takes effect */
+	if (optname == TLS_TX) {
+		lock_sock(sk);
+		if (skb_queue_len(&csk->txq))
+			chtls_push_frames(csk, 0);
+		release_sock(sk);
+	}
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	keyid = get_new_keyid(csk, optname);
+	if (keyid < 0) {
+		ret = -ENOSPC;
+		goto out_nokey;
+	}
+
+	kaddr = keyid_to_addr(cdev->kmap.start, keyid);
+	kwr = (struct tls_key_req *)__skb_put_zero(skb, len);
+	kwr->wr.op_to_compl =
+		cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_COMPL_F |
+		      FW_WR_ATOMIC_V(1U));
+	kwr->wr.flowid_len16 =
+		cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16) |
+			    FW_WR_FLOWID_V(csk->tid)));
+	kwr->wr.protocol = 0;
+	kwr->wr.mfs = htons(TLS_MFS);
+	kwr->wr.reneg_to_write_rx = optname;
+
+	/* ulptx command */
+	kwr->req.cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+			    T5_ULP_MEMIO_ORDER_V(1) |
+			    T5_ULP_MEMIO_IMM_V(1));
+	kwr->req.len16 = cpu_to_be32((csk->tid << 8) |
+			      DIV_ROUND_UP(len - sizeof(kwr->wr), 16));
+	kwr->req.dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen >> 5));
+	kwr->req.lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(kaddr));
+
+	/* sub command */
+	kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
+	kwr->sc_imm.len = cpu_to_be32(klen);
+
+	/* key info */
+	kctx = (struct _key_ctx *)(kwr + 1);
+	ret = chtls_key_info(csk, kctx, keylen, optname);
+	if (ret)
+		goto out_notcb;
+
+	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
+	csk->wr_credits -= DIV_ROUND_UP(len, 16);
+	csk->wr_unacked += DIV_ROUND_UP(len, 16);
+	enqueue_wr(csk, skb);
+	cxgb4_ofld_send(csk->egress_dev, skb);
+
+	chtls_set_scmd(csk);
+	/* Clear quiesce for Rx key */
+	if (optname == TLS_RX) {
+		ret = chtls_set_tcb_keyid(sk, keyid);
+		if (ret)
+			goto out_notcb;
+		ret = chtls_set_tcb_field(sk, 0,
+					  TCB_ULP_RAW_V(TCB_ULP_RAW_M),
+					  TCB_ULP_RAW_V((TF_TLS_KEY_SIZE_V(1) |
+					  TF_TLS_CONTROL_V(1) |
+					  TF_TLS_ACTIVE_V(1) |
+					  TF_TLS_ENABLE_V(1))));
+		if (ret)
+			goto out_notcb;
+		ret = chtls_set_tcb_seqno(sk);
+		if (ret)
+			goto out_notcb;
+		ret = chtls_set_tcb_quiesce(sk, 0);
+		if (ret)
+			goto out_notcb;
+		csk->tlshws.rxkey = keyid;
+	} else {
+		csk->tlshws.tx_seq_no = 0;
+		csk->tlshws.txkey = keyid;
+	}
+
+	return ret;
+out_notcb:
+	free_tls_keyid(sk);
+out_nokey:
+	kfree_skb(skb);
+	return ret;
+}
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
new file mode 100644
index 0000000..afebbd8
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -0,0 +1,1875 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sched/signal.h>
+#include <net/tcp.h>
+#include <net/busy_poll.h>
+#include <crypto/aes.h>
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+static bool is_tls_tx(struct chtls_sock *csk)
+{
+	return csk->tlshws.txkey >= 0;
+}
+
+static bool is_tls_rx(struct chtls_sock *csk)
+{
+	return csk->tlshws.rxkey >= 0;
+}
+
+static int data_sgl_len(const struct sk_buff *skb)
+{
+	unsigned int cnt;
+
+	cnt = skb_shinfo(skb)->nr_frags;
+	return sgl_len(cnt) * 8;
+}
+
+static int nos_ivs(struct sock *sk, unsigned int size)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+	return DIV_ROUND_UP(size, csk->tlshws.mfs);
+}
+
+static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
+{
+	int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
+	int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
+
+	if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
+	    MAX_IMM_OFLD_TX_DATA_WR_LEN) {
+		ULP_SKB_CB(skb)->ulp.tls.iv = 1;
+		return 1;
+	}
+	ULP_SKB_CB(skb)->ulp.tls.iv = 0;
+	return 0;
+}
+
+static int max_ivs_size(struct sock *sk, int size)
+{
+	return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
+}
+
+static int ivs_size(struct sock *sk, const struct sk_buff *skb)
+{
+	return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
+		 CIPHER_BLOCK_SIZE) : 0;
+}
+
+static int flowc_wr_credits(int nparams, int *flowclenp)
+{
+	int flowclen16, flowclen;
+
+	flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+	flowclen16 = DIV_ROUND_UP(flowclen, 16);
+	flowclen = flowclen16 * 16;
+
+	if (flowclenp)
+		*flowclenp = flowclen;
+
+	return flowclen16;
+}
+
+static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
+					   struct fw_flowc_wr *flowc,
+					   int flowclen)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct sk_buff *skb;
+
+	skb = alloc_skb(flowclen, GFP_ATOMIC);
+	if (!skb)
+		return NULL;
+
+	memcpy(__skb_put(skb, flowclen), flowc, flowclen);
+	skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+
+	return skb;
+}
+
+static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
+			 int flowclen)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct sk_buff *skb;
+	int flowclen16;
+	int ret;
+
+	flowclen16 = flowclen / 16;
+
+	if (csk_flag(sk, CSK_TX_DATA_SENT)) {
+		skb = create_flowc_wr_skb(sk, flowc, flowclen);
+		if (!skb)
+			return -ENOMEM;
+
+		skb_entail(sk, skb,
+			   ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
+		return 0;
+	}
+
+	ret = cxgb4_immdata_send(csk->egress_dev,
+				 csk->txq_idx,
+				 flowc, flowclen);
+	if (!ret)
+		return flowclen16;
+	skb = create_flowc_wr_skb(sk, flowc, flowclen);
+	if (!skb)
+		return -ENOMEM;
+	send_or_defer(sk, tp, skb, 0);
+	return flowclen16;
+}
+
+static u8 tcp_state_to_flowc_state(u8 state)
+{
+	switch (state) {
+	case TCP_ESTABLISHED:
+		return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+	case TCP_CLOSE_WAIT:
+		return FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
+	case TCP_FIN_WAIT1:
+		return FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
+	case TCP_CLOSING:
+		return FW_FLOWC_MNEM_TCPSTATE_CLOSING;
+	case TCP_LAST_ACK:
+		return FW_FLOWC_MNEM_TCPSTATE_LASTACK;
+	case TCP_FIN_WAIT2:
+		return FW_FLOWC_MNEM_TCPSTATE_FINWAIT2;
+	}
+
+	return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+}
+
+int send_tx_flowc_wr(struct sock *sk, int compl,
+		     u32 snd_nxt, u32 rcv_nxt)
+{
+	struct flowc_packed {
+		struct fw_flowc_wr fc;
+		struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
+	} __packed sflowc;
+	int nparams, paramidx, flowclen16, flowclen;
+	struct fw_flowc_wr *flowc;
+	struct chtls_sock *csk;
+	struct tcp_sock *tp;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	tp = tcp_sk(sk);
+	memset(&sflowc, 0, sizeof(sflowc));
+	flowc = &sflowc.fc;
+
+#define FLOWC_PARAM(__m, __v) \
+	do { \
+		flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \
+		flowc->mnemval[paramidx].val = cpu_to_be32(__v); \
+		paramidx++; \
+	} while (0)
+
+	paramidx = 0;
+
+	FLOWC_PARAM(PFNVFN, FW_PFVF_CMD_PFN_V(csk->cdev->lldi->pf));
+	FLOWC_PARAM(CH, csk->tx_chan);
+	FLOWC_PARAM(PORT, csk->tx_chan);
+	FLOWC_PARAM(IQID, csk->rss_qid);
+	FLOWC_PARAM(SNDNXT, tp->snd_nxt);
+	FLOWC_PARAM(RCVNXT, tp->rcv_nxt);
+	FLOWC_PARAM(SNDBUF, csk->sndbuf);
+	FLOWC_PARAM(MSS, tp->mss_cache);
+	FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state));
+
+	if (SND_WSCALE(tp))
+		FLOWC_PARAM(RCV_SCALE, SND_WSCALE(tp));
+
+	if (csk->ulp_mode == ULP_MODE_TLS)
+		FLOWC_PARAM(ULD_MODE, ULP_MODE_TLS);
+
+	if (csk->tlshws.fcplenmax)
+		FLOWC_PARAM(TXDATAPLEN_MAX, csk->tlshws.fcplenmax);
+
+	nparams = paramidx;
+#undef FLOWC_PARAM
+
+	flowclen16 = flowc_wr_credits(nparams, &flowclen);
+	flowc->op_to_nparams =
+		cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+			    FW_WR_COMPL_V(compl) |
+			    FW_FLOWC_WR_NPARAMS_V(nparams));
+	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+					  FW_WR_FLOWID_V(csk->tid));
+
+	return send_flowc_wr(sk, flowc, flowclen);
+}
+
+/* Copy IVs to WR */
+static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb)
+
+{
+	struct chtls_sock *csk;
+	unsigned char *iv_loc;
+	struct chtls_hws *hws;
+	unsigned char *ivs;
+	u16 number_of_ivs;
+	struct page *page;
+	int err = 0;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	hws = &csk->tlshws;
+	number_of_ivs = nos_ivs(sk, skb->len);
+
+	if (number_of_ivs > MAX_IVS_PAGE) {
+		pr_warn("MAX IVs in PAGE exceeded %d\n", number_of_ivs);
+		return -ENOMEM;
+	}
+
+	/* generate the  IVs */
+	ivs = kmalloc_array(CIPHER_BLOCK_SIZE, number_of_ivs, GFP_ATOMIC);
+	if (!ivs)
+		return -ENOMEM;
+	get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
+
+	if (skb_ulp_tls_iv_imm(skb)) {
+		/* send the IVs as immediate data in the WR */
+		iv_loc = (unsigned char *)__skb_push(skb, number_of_ivs *
+						CIPHER_BLOCK_SIZE);
+		if (iv_loc)
+			memcpy(iv_loc, ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
+
+		hws->ivsize = number_of_ivs * CIPHER_BLOCK_SIZE;
+	} else {
+		/* Send the IVs as sgls */
+		/* Already accounted IV DSGL for credits */
+		skb_shinfo(skb)->nr_frags--;
+		page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0);
+		if (!page) {
+			pr_info("%s : Page allocation for IVs failed\n",
+				__func__);
+			err = -ENOMEM;
+			goto out;
+		}
+		memcpy(page_address(page), ivs, number_of_ivs *
+		       CIPHER_BLOCK_SIZE);
+		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0,
+				   number_of_ivs * CIPHER_BLOCK_SIZE);
+		hws->ivsize = 0;
+	}
+out:
+	kfree(ivs);
+	return err;
+}
+
+/* Copy Key to WR */
+static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
+{
+	struct ulptx_sc_memrd *sc_memrd;
+	struct chtls_sock *csk;
+	struct chtls_dev *cdev;
+	struct ulptx_idata *sc;
+	struct chtls_hws *hws;
+	u32 immdlen;
+	int kaddr;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	hws = &csk->tlshws;
+	cdev = csk->cdev;
+
+	immdlen = sizeof(*sc) + sizeof(*sc_memrd);
+	kaddr = keyid_to_addr(cdev->kmap.start, hws->txkey);
+	sc = (struct ulptx_idata *)__skb_push(skb, immdlen);
+	if (sc) {
+		sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+		sc->len = htonl(0);
+		sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
+		sc_memrd->cmd_to_len =
+				htonl(ULPTX_CMD_V(ULP_TX_SC_MEMRD) |
+				ULP_TX_SC_MORE_V(1) |
+				ULPTX_LEN16_V(hws->keylen >> 4));
+		sc_memrd->addr = htonl(kaddr);
+	}
+}
+
+static u64 tlstx_incr_seqnum(struct chtls_hws *hws)
+{
+	return hws->tx_seq_no++;
+}
+
+static bool is_sg_request(const struct sk_buff *skb)
+{
+	return skb->peeked ||
+		(skb->len > MAX_IMM_ULPTX_WR_LEN);
+}
+
+/*
+ * Returns true if an sk_buff carries urgent data.
+ */
+static bool skb_urgent(struct sk_buff *skb)
+{
+	return ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG;
+}
+
+/* TLS content type for CPL SFO */
+static unsigned char tls_content_type(unsigned char content_type)
+{
+	switch (content_type) {
+	case TLS_HDR_TYPE_CCS:
+		return CPL_TX_TLS_SFO_TYPE_CCS;
+	case TLS_HDR_TYPE_ALERT:
+		return CPL_TX_TLS_SFO_TYPE_ALERT;
+	case TLS_HDR_TYPE_HANDSHAKE:
+		return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
+	case TLS_HDR_TYPE_HEARTBEAT:
+		return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
+	}
+	return CPL_TX_TLS_SFO_TYPE_DATA;
+}
+
+static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
+			   int dlen, int tls_immd, u32 credits,
+			   int expn, int pdus)
+{
+	struct fw_tlstx_data_wr *req_wr;
+	struct cpl_tx_tls_sfo *req_cpl;
+	unsigned int wr_ulp_mode_force;
+	struct tls_scmd *updated_scmd;
+	unsigned char data_type;
+	struct chtls_sock *csk;
+	struct net_device *dev;
+	struct chtls_hws *hws;
+	struct tls_scmd *scmd;
+	struct adapter *adap;
+	unsigned char *req;
+	int immd_len;
+	int iv_imm;
+	int len;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	iv_imm = skb_ulp_tls_iv_imm(skb);
+	dev = csk->egress_dev;
+	adap = netdev2adap(dev);
+	hws = &csk->tlshws;
+	scmd = &hws->scmd;
+	len = dlen + expn;
+
+	dlen = (dlen < hws->mfs) ? dlen : hws->mfs;
+	atomic_inc(&adap->chcr_stats.tls_pdu_tx);
+
+	updated_scmd = scmd;
+	updated_scmd->seqno_numivs &= 0xffffff80;
+	updated_scmd->seqno_numivs |= SCMD_NUM_IVS_V(pdus);
+	hws->scmd = *updated_scmd;
+
+	req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo));
+	req_cpl = (struct cpl_tx_tls_sfo *)req;
+	req = (unsigned char *)__skb_push(skb, (sizeof(struct
+				fw_tlstx_data_wr)));
+
+	req_wr = (struct fw_tlstx_data_wr *)req;
+	immd_len = (tls_immd ? dlen : 0);
+	req_wr->op_to_immdlen =
+		htonl(FW_WR_OP_V(FW_TLSTX_DATA_WR) |
+		FW_TLSTX_DATA_WR_COMPL_V(1) |
+		FW_TLSTX_DATA_WR_IMMDLEN_V(immd_len));
+	req_wr->flowid_len16 = htonl(FW_TLSTX_DATA_WR_FLOWID_V(csk->tid) |
+				     FW_TLSTX_DATA_WR_LEN16_V(credits));
+	wr_ulp_mode_force = TX_ULP_MODE_V(ULP_MODE_TLS);
+
+	if (is_sg_request(skb))
+		wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
+			((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
+			FW_OFLD_TX_DATA_WR_SHOVE_F);
+
+	req_wr->lsodisable_to_flags =
+			htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
+			      FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
+			      T6_TX_FORCE_F | wr_ulp_mode_force |
+			      TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
+					 skb_queue_empty(&csk->txq)));
+
+	req_wr->ctxloc_to_exp =
+			htonl(FW_TLSTX_DATA_WR_NUMIVS_V(pdus) |
+			      FW_TLSTX_DATA_WR_EXP_V(expn) |
+			      FW_TLSTX_DATA_WR_CTXLOC_V(CHTLS_KEY_CONTEXT_DDR) |
+			      FW_TLSTX_DATA_WR_IVDSGL_V(!iv_imm) |
+			      FW_TLSTX_DATA_WR_KEYSIZE_V(hws->keylen >> 4));
+
+	/* Fill in the length */
+	req_wr->plen = htonl(len);
+	req_wr->mfs = htons(hws->mfs);
+	req_wr->adjustedplen_pkd =
+		htons(FW_TLSTX_DATA_WR_ADJUSTEDPLEN_V(hws->adjustlen));
+	req_wr->expinplenmax_pkd =
+		htons(FW_TLSTX_DATA_WR_EXPINPLENMAX_V(hws->expansion));
+	req_wr->pdusinplenmax_pkd =
+		FW_TLSTX_DATA_WR_PDUSINPLENMAX_V(hws->pdus);
+	req_wr->r10 = 0;
+
+	data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type);
+	req_cpl->op_to_seg_len = htonl(CPL_TX_TLS_SFO_OPCODE_V(CPL_TX_TLS_SFO) |
+				       CPL_TX_TLS_SFO_DATA_TYPE_V(data_type) |
+				       CPL_TX_TLS_SFO_CPL_LEN_V(2) |
+				       CPL_TX_TLS_SFO_SEG_LEN_V(dlen));
+	req_cpl->pld_len = htonl(len - expn);
+
+	req_cpl->type_protover = htonl(CPL_TX_TLS_SFO_TYPE_V
+		((data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT) ?
+		TLS_HDR_TYPE_HEARTBEAT : 0) |
+		CPL_TX_TLS_SFO_PROTOVER_V(0));
+
+	/* create the s-command */
+	req_cpl->r1_lo = 0;
+	req_cpl->seqno_numivs  = cpu_to_be32(hws->scmd.seqno_numivs);
+	req_cpl->ivgen_hdrlen = cpu_to_be32(hws->scmd.ivgen_hdrlen);
+	req_cpl->scmd1 = cpu_to_be64(tlstx_incr_seqnum(hws));
+}
+
+/*
+ * Calculate the TLS data expansion size
+ */
+static int chtls_expansion_size(struct sock *sk, int data_len,
+				int fullpdu,
+				unsigned short *pducnt)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct chtls_hws *hws = &csk->tlshws;
+	struct tls_scmd *scmd = &hws->scmd;
+	int fragsize = hws->mfs;
+	int expnsize = 0;
+	int fragleft;
+	int fragcnt;
+	int expppdu;
+
+	if (SCMD_CIPH_MODE_G(scmd->seqno_numivs) ==
+	    SCMD_CIPH_MODE_AES_GCM) {
+		expppdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
+			  TLS_HEADER_LENGTH;
+
+		if (fullpdu) {
+			*pducnt = data_len / (expppdu + fragsize);
+			if (*pducnt > 32)
+				*pducnt = 32;
+			else if (!*pducnt)
+				*pducnt = 1;
+			expnsize = (*pducnt) * expppdu;
+			return expnsize;
+		}
+		fragcnt = (data_len / fragsize);
+		expnsize =  fragcnt * expppdu;
+		fragleft = data_len % fragsize;
+		if (fragleft > 0)
+			expnsize += expppdu;
+	}
+	return expnsize;
+}
+
+/* WR with IV, KEY and CPL SFO added */
+static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
+			       int tls_tx_imm, int tls_len, u32 credits)
+{
+	unsigned short pdus_per_ulp = 0;
+	struct chtls_sock *csk;
+	struct chtls_hws *hws;
+	int expn_sz;
+	int pdus;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	hws = &csk->tlshws;
+	pdus = DIV_ROUND_UP(tls_len, hws->mfs);
+	expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL);
+	if (!hws->compute) {
+		hws->expansion = chtls_expansion_size(sk,
+						      hws->fcplenmax,
+						      1, &pdus_per_ulp);
+		hws->pdus = pdus_per_ulp;
+		hws->adjustlen = hws->pdus *
+			((hws->expansion / hws->pdus) + hws->mfs);
+		hws->compute = 1;
+	}
+	if (tls_copy_ivs(sk, skb))
+		return;
+	tls_copy_tx_key(sk, skb);
+	tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus);
+	hws->tx_seq_no += (pdus - 1);
+}
+
+static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
+			    unsigned int immdlen, int len,
+			    u32 credits, u32 compl)
+{
+	struct fw_ofld_tx_data_wr *req;
+	unsigned int wr_ulp_mode_force;
+	struct chtls_sock *csk;
+	unsigned int opcode;
+
+	csk = rcu_dereference_sk_user_data(sk);
+	opcode = FW_OFLD_TX_DATA_WR;
+
+	req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
+	req->op_to_immdlen = htonl(WR_OP_V(opcode) |
+				FW_WR_COMPL_V(compl) |
+				FW_WR_IMMDLEN_V(immdlen));
+	req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
+				FW_WR_LEN16_V(credits));
+
+	wr_ulp_mode_force = TX_ULP_MODE_V(csk->ulp_mode);
+	if (is_sg_request(skb))
+		wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F |
+			((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
+				FW_OFLD_TX_DATA_WR_SHOVE_F);
+
+	req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
+			FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
+			FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
+					(sk, CSK_TX_MORE_DATA)) &&
+					 skb_queue_empty(&csk->txq)));
+	req->plen = htonl(len);
+}
+
+static int chtls_wr_size(struct chtls_sock *csk, const struct sk_buff *skb,
+			 bool size)
+{
+	int wr_size;
+
+	wr_size = TLS_WR_CPL_LEN;
+	wr_size += KEY_ON_MEM_SZ;
+	wr_size += ivs_size(csk->sk, skb);
+
+	if (size)
+		return wr_size;
+
+	/* frags counted for IV dsgl */
+	if (!skb_ulp_tls_iv_imm(skb))
+		skb_shinfo(skb)->nr_frags++;
+
+	return wr_size;
+}
+
+static bool is_ofld_imm(struct chtls_sock *csk, const struct sk_buff *skb)
+{
+	int length = skb->len;
+
+	if (skb->peeked || skb->len > MAX_IMM_ULPTX_WR_LEN)
+		return false;
+
+	if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
+		/* Check TLS header len for Immediate */
+		if (csk->ulp_mode == ULP_MODE_TLS &&
+		    skb_ulp_tls_inline(skb))
+			length += chtls_wr_size(csk, skb, true);
+		else
+			length += sizeof(struct fw_ofld_tx_data_wr);
+
+		return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+	}
+	return true;
+}
+
+static unsigned int calc_tx_flits(const struct sk_buff *skb,
+				  unsigned int immdlen)
+{
+	unsigned int flits, cnt;
+
+	flits = immdlen / 8;   /* headers */
+	cnt = skb_shinfo(skb)->nr_frags;
+	if (skb_tail_pointer(skb) != skb_transport_header(skb))
+		cnt++;
+	return flits + sgl_len(cnt);
+}
+
+static void arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+int chtls_push_frames(struct chtls_sock *csk, int comp)
+{
+	struct chtls_hws *hws = &csk->tlshws;
+	struct tcp_sock *tp;
+	struct sk_buff *skb;
+	int total_size = 0;
+	struct sock *sk;
+	int wr_size;
+
+	wr_size = sizeof(struct fw_ofld_tx_data_wr);
+	sk = csk->sk;
+	tp = tcp_sk(sk);
+
+	if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
+		return 0;
+
+	if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
+		return 0;
+
+	while (csk->wr_credits && (skb = skb_peek(&csk->txq)) &&
+	       (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_HOLD) ||
+		skb_queue_len(&csk->txq) > 1)) {
+		unsigned int credit_len = skb->len;
+		unsigned int credits_needed;
+		unsigned int completion = 0;
+		int tls_len = skb->len;/* TLS data len before IV/key */
+		unsigned int immdlen;
+		int len = skb->len;    /* length [ulp bytes] inserted by hw */
+		int flowclen16 = 0;
+		int tls_tx_imm = 0;
+
+		immdlen = skb->len;
+		if (!is_ofld_imm(csk, skb)) {
+			immdlen = skb_transport_offset(skb);
+			if (skb_ulp_tls_inline(skb))
+				wr_size = chtls_wr_size(csk, skb, false);
+			credit_len = 8 * calc_tx_flits(skb, immdlen);
+		} else {
+			if (skb_ulp_tls_inline(skb)) {
+				wr_size = chtls_wr_size(csk, skb, false);
+				tls_tx_imm = 1;
+			}
+		}
+		if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR))
+			credit_len += wr_size;
+		credits_needed = DIV_ROUND_UP(credit_len, 16);
+		if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
+			flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt,
+						      tp->rcv_nxt);
+			if (flowclen16 <= 0)
+				break;
+			csk->wr_credits -= flowclen16;
+			csk->wr_unacked += flowclen16;
+			csk->wr_nondata += flowclen16;
+			csk_set_flag(csk, CSK_TX_DATA_SENT);
+		}
+
+		if (csk->wr_credits < credits_needed) {
+			if (skb_ulp_tls_inline(skb) &&
+			    !skb_ulp_tls_iv_imm(skb))
+				skb_shinfo(skb)->nr_frags--;
+			break;
+		}
+
+		__skb_unlink(skb, &csk->txq);
+		skb_set_queue_mapping(skb, (csk->txq_idx << 1) |
+				      CPL_PRIORITY_DATA);
+		if (hws->ofld)
+			hws->txqid = (skb->queue_mapping >> 1);
+		skb->csum = (__force __wsum)(credits_needed + csk->wr_nondata);
+		csk->wr_credits -= credits_needed;
+		csk->wr_unacked += credits_needed;
+		csk->wr_nondata = 0;
+		enqueue_wr(csk, skb);
+
+		if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) {
+			if ((comp && csk->wr_unacked == credits_needed) ||
+			    (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) ||
+			    csk->wr_unacked >= csk->wr_max_credits / 2) {
+				completion = 1;
+				csk->wr_unacked = 0;
+			}
+			if (skb_ulp_tls_inline(skb))
+				make_tlstx_data_wr(sk, skb, tls_tx_imm,
+						   tls_len, credits_needed);
+			else
+				make_tx_data_wr(sk, skb, immdlen, len,
+						credits_needed, completion);
+			tp->snd_nxt += len;
+			tp->lsndtime = tcp_time_stamp(tp);
+			if (completion)
+				ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
+		} else {
+			struct cpl_close_con_req *req = cplhdr(skb);
+			unsigned int cmd  = CPL_OPCODE_G(ntohl
+					     (OPCODE_TID(req)));
+
+			if (cmd == CPL_CLOSE_CON_REQ)
+				csk_set_flag(csk,
+					     CSK_CLOSE_CON_REQUESTED);
+
+			if ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) &&
+			    (csk->wr_unacked >= csk->wr_max_credits / 2)) {
+				req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
+				csk->wr_unacked = 0;
+			}
+		}
+		total_size += skb->truesize;
+		if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_BARRIER)
+			csk_set_flag(csk, CSK_TX_WAIT_IDLE);
+		t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+		cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
+	}
+	sk->sk_wmem_queued -= total_size;
+	return total_size;
+}
+
+static void mark_urg(struct tcp_sock *tp, int flags,
+		     struct sk_buff *skb)
+{
+	if (unlikely(flags & MSG_OOB)) {
+		tp->snd_up = tp->write_seq;
+		ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG |
+					 ULPCB_FLAG_BARRIER |
+					 ULPCB_FLAG_NO_APPEND |
+					 ULPCB_FLAG_NEED_HDR;
+	}
+}
+
+/*
+ * Returns true if a connection should send more data to TCP engine
+ */
+static bool should_push(struct sock *sk)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct chtls_dev *cdev = csk->cdev;
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	/*
+	 * If we've released our offload resources there's nothing to do ...
+	 */
+	if (!cdev)
+		return false;
+
+	/*
+	 * If there aren't any work requests in flight, or there isn't enough
+	 * data in flight, or Nagle is off then send the current TX_DATA
+	 * otherwise hold it and wait to accumulate more data.
+	 */
+	return csk->wr_credits == csk->wr_max_credits ||
+		(tp->nonagle & TCP_NAGLE_OFF);
+}
+
+/*
+ * Returns true if a TCP socket is corked.
+ */
+static bool corked(const struct tcp_sock *tp, int flags)
+{
+	return (flags & MSG_MORE) || (tp->nonagle & TCP_NAGLE_CORK);
+}
+
+/*
+ * Returns true if a send should try to push new data.
+ */
+static bool send_should_push(struct sock *sk, int flags)
+{
+	return should_push(sk) && !corked(tcp_sk(sk), flags);
+}
+
+void chtls_tcp_push(struct sock *sk, int flags)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	int qlen = skb_queue_len(&csk->txq);
+
+	if (likely(qlen)) {
+		struct sk_buff *skb = skb_peek_tail(&csk->txq);
+		struct tcp_sock *tp = tcp_sk(sk);
+
+		mark_urg(tp, flags, skb);
+
+		if (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) &&
+		    corked(tp, flags)) {
+			ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_HOLD;
+			return;
+		}
+
+		ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_HOLD;
+		if (qlen == 1 &&
+		    ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
+		     should_push(sk)))
+			chtls_push_frames(csk, 1);
+	}
+}
+
+/*
+ * Calculate the size for a new send sk_buff.  It's maximum size so we can
+ * pack lots of data into it, unless we plan to send it immediately, in which
+ * case we size it more tightly.
+ *
+ * Note: we don't bother compensating for MSS < PAGE_SIZE because it doesn't
+ * arise in normal cases and when it does we are just wasting memory.
+ */
+static int select_size(struct sock *sk, int io_len, int flags, int len)
+{
+	const int pgbreak = SKB_MAX_HEAD(len);
+
+	/*
+	 * If the data wouldn't fit in the main body anyway, put only the
+	 * header in the main body so it can use immediate data and place all
+	 * the payload in page fragments.
+	 */
+	if (io_len > pgbreak)
+		return 0;
+
+	/*
+	 * If we will be accumulating payload get a large main body.
+	 */
+	if (!send_should_push(sk, flags))
+		return pgbreak;
+
+	return io_len;
+}
+
+void skb_entail(struct sock *sk, struct sk_buff *skb, int flags)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	ULP_SKB_CB(skb)->seq = tp->write_seq;
+	ULP_SKB_CB(skb)->flags = flags;
+	__skb_queue_tail(&csk->txq, skb);
+	sk->sk_wmem_queued += skb->truesize;
+
+	if (TCP_PAGE(sk) && TCP_OFF(sk)) {
+		put_page(TCP_PAGE(sk));
+		TCP_PAGE(sk) = NULL;
+		TCP_OFF(sk) = 0;
+	}
+}
+
+static struct sk_buff *get_tx_skb(struct sock *sk, int size)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation);
+	if (likely(skb)) {
+		skb_reserve(skb, TX_HEADER_LEN);
+		skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
+		skb_reset_transport_header(skb);
+	}
+	return skb;
+}
+
+static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct sk_buff *skb;
+
+	skb = alloc_skb(((zcopy ? 0 : size) + TX_TLSHDR_LEN +
+			KEY_ON_MEM_SZ + max_ivs_size(sk, size)),
+			sk->sk_allocation);
+	if (likely(skb)) {
+		skb_reserve(skb, (TX_TLSHDR_LEN +
+			    KEY_ON_MEM_SZ + max_ivs_size(sk, size)));
+		skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
+		skb_reset_transport_header(skb);
+		ULP_SKB_CB(skb)->ulp.tls.ofld = 1;
+		ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type;
+	}
+	return skb;
+}
+
+static void tx_skb_finalize(struct sk_buff *skb)
+{
+	struct ulp_skb_cb *cb = ULP_SKB_CB(skb);
+
+	if (!(cb->flags & ULPCB_FLAG_NO_HDR))
+		cb->flags = ULPCB_FLAG_NEED_HDR;
+	cb->flags |= ULPCB_FLAG_NO_APPEND;
+}
+
+static void push_frames_if_head(struct sock *sk)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+	if (skb_queue_len(&csk->txq) == 1)
+		chtls_push_frames(csk, 1);
+}
+
+static int chtls_skb_copy_to_page_nocache(struct sock *sk,
+					  struct iov_iter *from,
+					  struct sk_buff *skb,
+					  struct page *page,
+					  int off, int copy)
+{
+	int err;
+
+	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
+				       off, copy, skb->len);
+	if (err)
+		return err;
+
+	skb->len             += copy;
+	skb->data_len        += copy;
+	skb->truesize        += copy;
+	sk->sk_wmem_queued   += copy;
+	return 0;
+}
+
+/* Read TLS header to find content type and data length */
+static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
+{
+	if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
+		return -EFAULT;
+	return (__force int)cpu_to_be16(thdr->length);
+}
+
+static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+{
+	return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
+}
+
+static int csk_wait_memory(struct chtls_dev *cdev,
+			   struct sock *sk, long *timeo_p)
+{
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+	int sndbuf, err = 0;
+	long current_timeo;
+	long vm_wait = 0;
+	bool noblock;
+
+	current_timeo = *timeo_p;
+	noblock = (*timeo_p ? false : true);
+	sndbuf = cdev->max_host_sndbuf;
+	if (csk_mem_free(cdev, sk)) {
+		current_timeo = (prandom_u32() % (HZ / 5)) + 2;
+		vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+	}
+
+	add_wait_queue(sk_sleep(sk), &wait);
+	while (1) {
+		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+			goto do_error;
+		if (!*timeo_p) {
+			if (noblock)
+				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+			goto do_nonblock;
+		}
+		if (signal_pending(current))
+			goto do_interrupted;
+		sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+		if (csk_mem_free(cdev, sk) && !vm_wait)
+			break;
+
+		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+		sk->sk_write_pending++;
+		sk_wait_event(sk, &current_timeo, sk->sk_err ||
+			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
+			      (csk_mem_free(cdev, sk) && !vm_wait), &wait);
+		sk->sk_write_pending--;
+
+		if (vm_wait) {
+			vm_wait -= current_timeo;
+			current_timeo = *timeo_p;
+			if (current_timeo != MAX_SCHEDULE_TIMEOUT) {
+				current_timeo -= vm_wait;
+				if (current_timeo < 0)
+					current_timeo = 0;
+			}
+			vm_wait = 0;
+		}
+		*timeo_p = current_timeo;
+	}
+do_rm_wq:
+	remove_wait_queue(sk_sleep(sk), &wait);
+	return err;
+do_error:
+	err = -EPIPE;
+	goto do_rm_wq;
+do_nonblock:
+	err = -EAGAIN;
+	goto do_rm_wq;
+do_interrupted:
+	err = sock_intr_errno(*timeo_p);
+	goto do_rm_wq;
+}
+
+int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct chtls_dev *cdev = csk->cdev;
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct sk_buff *skb;
+	int mss, flags, err;
+	int recordsz = 0;
+	int copied = 0;
+	int hdrlen = 0;
+	long timeo;
+
+	lock_sock(sk);
+	flags = msg->msg_flags;
+	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+
+	if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+		err = sk_stream_wait_connect(sk, &timeo);
+		if (err)
+			goto out_err;
+	}
+
+	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+	err = -EPIPE;
+	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+		goto out_err;
+
+	mss = csk->mss;
+	csk_set_flag(csk, CSK_TX_MORE_DATA);
+
+	while (msg_data_left(msg)) {
+		int copy = 0;
+
+		skb = skb_peek_tail(&csk->txq);
+		if (skb) {
+			copy = mss - skb->len;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		}
+		if (!csk_mem_free(cdev, sk))
+			goto wait_for_sndbuf;
+
+		if (is_tls_tx(csk) && !csk->tlshws.txleft) {
+			struct tls_hdr hdr;
+
+			recordsz = tls_header_read(&hdr, &msg->msg_iter);
+			size -= TLS_HEADER_LENGTH;
+			hdrlen += TLS_HEADER_LENGTH;
+			csk->tlshws.txleft = recordsz;
+			csk->tlshws.type = hdr.type;
+			if (skb)
+				ULP_SKB_CB(skb)->ulp.tls.type = hdr.type;
+		}
+
+		if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
+		    copy <= 0) {
+new_buf:
+			if (skb) {
+				tx_skb_finalize(skb);
+				push_frames_if_head(sk);
+			}
+
+			if (is_tls_tx(csk)) {
+				skb = get_record_skb(sk,
+						     select_size(sk,
+								 recordsz,
+								 flags,
+								 TX_TLSHDR_LEN),
+								 false);
+			} else {
+				skb = get_tx_skb(sk,
+						 select_size(sk, size, flags,
+							     TX_HEADER_LEN));
+			}
+			if (unlikely(!skb))
+				goto wait_for_memory;
+
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			copy = mss;
+		}
+		if (copy > size)
+			copy = size;
+
+		if (skb_tailroom(skb) > 0) {
+			copy = min(copy, skb_tailroom(skb));
+			if (is_tls_tx(csk))
+				copy = min_t(int, copy, csk->tlshws.txleft);
+			err = skb_add_data_nocache(sk, skb,
+						   &msg->msg_iter, copy);
+			if (err)
+				goto do_fault;
+		} else {
+			int i = skb_shinfo(skb)->nr_frags;
+			struct page *page = TCP_PAGE(sk);
+			int pg_size = PAGE_SIZE;
+			int off = TCP_OFF(sk);
+			bool merge;
+
+			if (!page)
+				goto wait_for_memory;
+
+			pg_size <<= compound_order(page);
+			if (off < pg_size &&
+			    skb_can_coalesce(skb, i, page, off)) {
+				merge = 1;
+				goto copy;
+			}
+			merge = 0;
+			if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) :
+			    MAX_SKB_FRAGS))
+				goto new_buf;
+
+			if (page && off == pg_size) {
+				put_page(page);
+				TCP_PAGE(sk) = page = NULL;
+				pg_size = PAGE_SIZE;
+			}
+
+			if (!page) {
+				gfp_t gfp = sk->sk_allocation;
+				int order = cdev->send_page_order;
+
+				if (order) {
+					page = alloc_pages(gfp | __GFP_COMP |
+							   __GFP_NOWARN |
+							   __GFP_NORETRY,
+							   order);
+					if (page)
+						pg_size <<=
+							compound_order(page);
+				}
+				if (!page) {
+					page = alloc_page(gfp);
+					pg_size = PAGE_SIZE;
+				}
+				if (!page)
+					goto wait_for_memory;
+				off = 0;
+			}
+copy:
+			if (copy > pg_size - off)
+				copy = pg_size - off;
+			if (is_tls_tx(csk))
+				copy = min_t(int, copy, csk->tlshws.txleft);
+
+			err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter,
+							     skb, page,
+							     off, copy);
+			if (unlikely(err)) {
+				if (!TCP_PAGE(sk)) {
+					TCP_PAGE(sk) = page;
+					TCP_OFF(sk) = 0;
+				}
+				goto do_fault;
+			}
+			/* Update the skb. */
+			if (merge) {
+				skb_shinfo(skb)->frags[i - 1].size += copy;
+			} else {
+				skb_fill_page_desc(skb, i, page, off, copy);
+				if (off + copy < pg_size) {
+					/* space left keep page */
+					get_page(page);
+					TCP_PAGE(sk) = page;
+				} else {
+					TCP_PAGE(sk) = NULL;
+				}
+			}
+			TCP_OFF(sk) = off + copy;
+		}
+		if (unlikely(skb->len == mss))
+			tx_skb_finalize(skb);
+		tp->write_seq += copy;
+		copied += copy;
+		size -= copy;
+
+		if (is_tls_tx(csk))
+			csk->tlshws.txleft -= copy;
+
+		if (corked(tp, flags) &&
+		    (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
+			ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
+
+		if (size == 0)
+			goto out;
+
+		if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
+			push_frames_if_head(sk);
+		continue;
+wait_for_sndbuf:
+		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+		err = csk_wait_memory(cdev, sk, &timeo);
+		if (err)
+			goto do_error;
+	}
+out:
+	csk_reset_flag(csk, CSK_TX_MORE_DATA);
+	if (copied)
+		chtls_tcp_push(sk, flags);
+done:
+	release_sock(sk);
+	return copied + hdrlen;
+do_fault:
+	if (!skb->len) {
+		__skb_unlink(skb, &csk->txq);
+		sk->sk_wmem_queued -= skb->truesize;
+		__kfree_skb(skb);
+	}
+do_error:
+	if (copied)
+		goto out;
+out_err:
+	if (csk_conn_inline(csk))
+		csk_reset_flag(csk, CSK_TX_MORE_DATA);
+	copied = sk_stream_error(sk, flags, err);
+	goto done;
+}
+
+int chtls_sendpage(struct sock *sk, struct page *page,
+		   int offset, size_t size, int flags)
+{
+	struct chtls_sock *csk;
+	struct chtls_dev *cdev;
+	int mss, err, copied;
+	struct tcp_sock *tp;
+	long timeo;
+
+	tp = tcp_sk(sk);
+	copied = 0;
+	csk = rcu_dereference_sk_user_data(sk);
+	cdev = csk->cdev;
+	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+
+	err = sk_stream_wait_connect(sk, &timeo);
+	if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
+	    err != 0)
+		goto out_err;
+
+	mss = csk->mss;
+	csk_set_flag(csk, CSK_TX_MORE_DATA);
+
+	while (size > 0) {
+		struct sk_buff *skb = skb_peek_tail(&csk->txq);
+		int copy, i;
+
+		if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
+		    (copy = mss - skb->len) <= 0) {
+new_buf:
+			if (!csk_mem_free(cdev, sk))
+				goto wait_for_sndbuf;
+
+			if (is_tls_tx(csk)) {
+				skb = get_record_skb(sk,
+						     select_size(sk, size,
+								 flags,
+								 TX_TLSHDR_LEN),
+						     true);
+			} else {
+				skb = get_tx_skb(sk, 0);
+			}
+			if (!skb)
+				goto wait_for_memory;
+			copy = mss;
+		}
+		if (copy > size)
+			copy = size;
+
+		i = skb_shinfo(skb)->nr_frags;
+		if (skb_can_coalesce(skb, i, page, offset)) {
+			skb_shinfo(skb)->frags[i - 1].size += copy;
+		} else if (i < MAX_SKB_FRAGS) {
+			get_page(page);
+			skb_fill_page_desc(skb, i, page, offset, copy);
+		} else {
+			tx_skb_finalize(skb);
+			push_frames_if_head(sk);
+			goto new_buf;
+		}
+
+		skb->len += copy;
+		if (skb->len == mss)
+			tx_skb_finalize(skb);
+		skb->data_len += copy;
+		skb->truesize += copy;
+		sk->sk_wmem_queued += copy;
+		tp->write_seq += copy;
+		copied += copy;
+		offset += copy;
+		size -= copy;
+
+		if (corked(tp, flags) &&
+		    (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
+			ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
+
+		if (!size)
+			break;
+
+		if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
+			push_frames_if_head(sk);
+		continue;
+wait_for_sndbuf:
+		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+		err = csk_wait_memory(cdev, sk, &timeo);
+		if (err)
+			goto do_error;
+	}
+out:
+	csk_reset_flag(csk, CSK_TX_MORE_DATA);
+	if (copied)
+		chtls_tcp_push(sk, flags);
+done:
+	release_sock(sk);
+	return copied;
+
+do_error:
+	if (copied)
+		goto out;
+
+out_err:
+	if (csk_conn_inline(csk))
+		csk_reset_flag(csk, CSK_TX_MORE_DATA);
+	copied = sk_stream_error(sk, flags, err);
+	goto done;
+}
+
+static void chtls_select_window(struct sock *sk)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+	unsigned int wnd = tp->rcv_wnd;
+
+	wnd = max_t(unsigned int, wnd, tcp_full_space(sk));
+	wnd = max_t(unsigned int, MIN_RCV_WND, wnd);
+
+	if (wnd > MAX_RCV_WND)
+		wnd = MAX_RCV_WND;
+
+/*
+ * Check if we need to grow the receive window in response to an increase in
+ * the socket's receive buffer size.  Some applications increase the buffer
+ * size dynamically and rely on the window to grow accordingly.
+ */
+
+	if (wnd > tp->rcv_wnd) {
+		tp->rcv_wup -= wnd - tp->rcv_wnd;
+		tp->rcv_wnd = wnd;
+		/* Mark the receive window as updated */
+		csk_reset_flag(csk, CSK_UPDATE_RCV_WND);
+	}
+}
+
+/*
+ * Send RX credits through an RX_DATA_ACK CPL message.  We are permitted
+ * to return without sending the message in case we cannot allocate
+ * an sk_buff.  Returns the number of credits sent.
+ */
+static u32 send_rx_credits(struct chtls_sock *csk, u32 credits)
+{
+	struct cpl_rx_data_ack *req;
+	struct sk_buff *skb;
+
+	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+	if (!skb)
+		return 0;
+	__skb_put(skb, sizeof(*req));
+	req = (struct cpl_rx_data_ack *)skb->head;
+
+	set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
+	INIT_TP_WR(req, csk->tid);
+	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+						    csk->tid));
+	req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) |
+				       RX_FORCE_ACK_F);
+	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+	return credits;
+}
+
+#define CREDIT_RETURN_STATE (TCPF_ESTABLISHED | \
+			     TCPF_FIN_WAIT1 | \
+			     TCPF_FIN_WAIT2)
+
+/*
+ * Called after some received data has been read.  It returns RX credits
+ * to the HW for the amount of data processed.
+ */
+static void chtls_cleanup_rbuf(struct sock *sk, int copied)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct tcp_sock *tp;
+	int must_send;
+	u32 credits;
+	u32 thres;
+
+	thres = 15 * 1024;
+
+	if (!sk_in_state(sk, CREDIT_RETURN_STATE))
+		return;
+
+	chtls_select_window(sk);
+	tp = tcp_sk(sk);
+	credits = tp->copied_seq - tp->rcv_wup;
+	if (unlikely(!credits))
+		return;
+
+/*
+ * For coalescing to work effectively ensure the receive window has
+ * at least 16KB left.
+ */
+	must_send = credits + 16384 >= tp->rcv_wnd;
+
+	if (must_send || credits >= thres)
+		tp->rcv_wup += send_rx_credits(csk, credits);
+}
+
+static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+			    int nonblock, int flags, int *addr_len)
+{
+	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+	struct net_device *dev = csk->egress_dev;
+	struct chtls_hws *hws = &csk->tlshws;
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct adapter *adap;
+	unsigned long avail;
+	int buffers_freed;
+	int copied = 0;
+	int request;
+	int target;
+	long timeo;
+
+	adap = netdev2adap(dev);
+	buffers_freed = 0;
+
+	timeo = sock_rcvtimeo(sk, nonblock);
+	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+	request = len;
+
+	if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
+		chtls_cleanup_rbuf(sk, copied);
+
+	do {
+		struct sk_buff *skb;
+		u32 offset = 0;
+
+		if (unlikely(tp->urg_data &&
+			     tp->urg_seq == tp->copied_seq)) {
+			if (copied)
+				break;
+			if (signal_pending(current)) {
+				copied = timeo ? sock_intr_errno(timeo) :
+					-EAGAIN;
+				break;
+			}
+		}
+		skb = skb_peek(&sk->sk_receive_queue);
+		if (skb)
+			goto found_ok_skb;
+		if (csk->wr_credits &&
+		    skb_queue_len(&csk->txq) &&
+		    chtls_push_frames(csk, csk->wr_credits ==
+				      csk->wr_max_credits))
+			sk->sk_write_space(sk);
+
+		if (copied >= target && !sk->sk_backlog.tail)
+			break;
+
+		if (copied) {
+			if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
+			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
+			    signal_pending(current))
+				break;
+
+			if (!timeo)
+				break;
+		} else {
+			if (sock_flag(sk, SOCK_DONE))
+				break;
+			if (sk->sk_err) {
+				copied = sock_error(sk);
+				break;
+			}
+			if (sk->sk_shutdown & RCV_SHUTDOWN)
+				break;
+			if (sk->sk_state == TCP_CLOSE) {
+				copied = -ENOTCONN;
+				break;
+			}
+			if (!timeo) {
+				copied = -EAGAIN;
+				break;
+			}
+			if (signal_pending(current)) {
+				copied = sock_intr_errno(timeo);
+				break;
+			}
+		}
+		if (sk->sk_backlog.tail) {
+			release_sock(sk);
+			lock_sock(sk);
+			chtls_cleanup_rbuf(sk, copied);
+			continue;
+		}
+
+		if (copied >= target)
+			break;
+		chtls_cleanup_rbuf(sk, copied);
+		sk_wait_data(sk, &timeo, NULL);
+		continue;
+found_ok_skb:
+		if (!skb->len) {
+			skb_dst_set(skb, NULL);
+			__skb_unlink(skb, &sk->sk_receive_queue);
+			kfree_skb(skb);
+
+			if (!copied && !timeo) {
+				copied = -EAGAIN;
+				break;
+			}
+
+			if (copied < target) {
+				release_sock(sk);
+				lock_sock(sk);
+				continue;
+			}
+			break;
+		}
+		offset = hws->copied_seq;
+		avail = skb->len - offset;
+		if (len < avail)
+			avail = len;
+
+		if (unlikely(tp->urg_data)) {
+			u32 urg_offset = tp->urg_seq - tp->copied_seq;
+
+			if (urg_offset < avail) {
+				if (urg_offset) {
+					avail = urg_offset;
+				} else if (!sock_flag(sk, SOCK_URGINLINE)) {
+					/* First byte is urgent, skip */
+					tp->copied_seq++;
+					offset++;
+					avail--;
+					if (!avail)
+						goto skip_copy;
+				}
+			}
+		}
+		if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
+			if (!copied) {
+				copied = -EFAULT;
+				break;
+			}
+		}
+
+		copied += avail;
+		len -= avail;
+		hws->copied_seq += avail;
+skip_copy:
+		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
+			tp->urg_data = 0;
+
+		if ((avail + offset) >= skb->len) {
+			if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
+				tp->copied_seq += skb->len;
+				hws->rcvpld = skb->hdr_len;
+			} else {
+				tp->copied_seq += hws->rcvpld;
+			}
+			chtls_free_skb(sk, skb);
+			buffers_freed++;
+			hws->copied_seq = 0;
+			if (copied >= target &&
+			    !skb_peek(&sk->sk_receive_queue))
+				break;
+		}
+	} while (len > 0);
+
+	if (buffers_freed)
+		chtls_cleanup_rbuf(sk, copied);
+	release_sock(sk);
+	return copied;
+}
+
+/*
+ * Peek at data in a socket's receive buffer.
+ */
+static int peekmsg(struct sock *sk, struct msghdr *msg,
+		   size_t len, int nonblock, int flags)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	u32 peek_seq, offset;
+	struct sk_buff *skb;
+	int copied = 0;
+	size_t avail;          /* amount of available data in current skb */
+	long timeo;
+
+	lock_sock(sk);
+	timeo = sock_rcvtimeo(sk, nonblock);
+	peek_seq = tp->copied_seq;
+
+	do {
+		if (unlikely(tp->urg_data && tp->urg_seq == peek_seq)) {
+			if (copied)
+				break;
+			if (signal_pending(current)) {
+				copied = timeo ? sock_intr_errno(timeo) :
+				-EAGAIN;
+				break;
+			}
+		}
+
+		skb_queue_walk(&sk->sk_receive_queue, skb) {
+			offset = peek_seq - ULP_SKB_CB(skb)->seq;
+			if (offset < skb->len)
+				goto found_ok_skb;
+		}
+
+		/* empty receive queue */
+		if (copied)
+			break;
+		if (sock_flag(sk, SOCK_DONE))
+			break;
+		if (sk->sk_err) {
+			copied = sock_error(sk);
+			break;
+		}
+		if (sk->sk_shutdown & RCV_SHUTDOWN)
+			break;
+		if (sk->sk_state == TCP_CLOSE) {
+			copied = -ENOTCONN;
+			break;
+		}
+		if (!timeo) {
+			copied = -EAGAIN;
+			break;
+		}
+		if (signal_pending(current)) {
+			copied = sock_intr_errno(timeo);
+			break;
+		}
+
+		if (sk->sk_backlog.tail) {
+			/* Do not sleep, just process backlog. */
+			release_sock(sk);
+			lock_sock(sk);
+		} else {
+			sk_wait_data(sk, &timeo, NULL);
+		}
+
+		if (unlikely(peek_seq != tp->copied_seq)) {
+			if (net_ratelimit())
+				pr_info("TCP(%s:%d), race in MSG_PEEK.\n",
+					current->comm, current->pid);
+			peek_seq = tp->copied_seq;
+		}
+		continue;
+
+found_ok_skb:
+		avail = skb->len - offset;
+		if (len < avail)
+			avail = len;
+		/*
+		 * Do we have urgent data here?  We need to skip over the
+		 * urgent byte.
+		 */
+		if (unlikely(tp->urg_data)) {
+			u32 urg_offset = tp->urg_seq - peek_seq;
+
+			if (urg_offset < avail) {
+				/*
+				 * The amount of data we are preparing to copy
+				 * contains urgent data.
+				 */
+				if (!urg_offset) { /* First byte is urgent */
+					if (!sock_flag(sk, SOCK_URGINLINE)) {
+						peek_seq++;
+						offset++;
+						avail--;
+					}
+					if (!avail)
+						continue;
+				} else {
+					/* stop short of the urgent data */
+					avail = urg_offset;
+				}
+			}
+		}
+
+		/*
+		 * If MSG_TRUNC is specified the data is discarded.
+		 */
+		if (likely(!(flags & MSG_TRUNC)))
+			if (skb_copy_datagram_msg(skb, offset, msg, len)) {
+				if (!copied) {
+					copied = -EFAULT;
+					break;
+				}
+			}
+		peek_seq += avail;
+		copied += avail;
+		len -= avail;
+	} while (len > 0);
+
+	release_sock(sk);
+	return copied;
+}
+
+int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+		  int nonblock, int flags, int *addr_len)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct chtls_sock *csk;
+	struct chtls_hws *hws;
+	unsigned long avail;    /* amount of available data in current skb */
+	int buffers_freed;
+	int copied = 0;
+	int request;
+	long timeo;
+	int target;             /* Read at least this many bytes */
+
+	buffers_freed = 0;
+
+	if (unlikely(flags & MSG_OOB))
+		return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
+					addr_len);
+
+	if (unlikely(flags & MSG_PEEK))
+		return peekmsg(sk, msg, len, nonblock, flags);
+
+	if (sk_can_busy_loop(sk) &&
+	    skb_queue_empty(&sk->sk_receive_queue) &&
+	    sk->sk_state == TCP_ESTABLISHED)
+		sk_busy_loop(sk, nonblock);
+
+	lock_sock(sk);
+	csk = rcu_dereference_sk_user_data(sk);
+	hws = &csk->tlshws;
+
+	if (is_tls_rx(csk))
+		return chtls_pt_recvmsg(sk, msg, len, nonblock,
+					flags, addr_len);
+
+	timeo = sock_rcvtimeo(sk, nonblock);
+	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+	request = len;
+
+	if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
+		chtls_cleanup_rbuf(sk, copied);
+
+	do {
+		struct sk_buff *skb;
+		u32 offset;
+
+		if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) {
+			if (copied)
+				break;
+			if (signal_pending(current)) {
+				copied = timeo ? sock_intr_errno(timeo) :
+					-EAGAIN;
+				break;
+			}
+		}
+
+		skb = skb_peek(&sk->sk_receive_queue);
+		if (skb)
+			goto found_ok_skb;
+
+		if (csk->wr_credits &&
+		    skb_queue_len(&csk->txq) &&
+		    chtls_push_frames(csk, csk->wr_credits ==
+				      csk->wr_max_credits))
+			sk->sk_write_space(sk);
+
+		if (copied >= target && !sk->sk_backlog.tail)
+			break;
+
+		if (copied) {
+			if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
+			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
+			    signal_pending(current))
+				break;
+		} else {
+			if (sock_flag(sk, SOCK_DONE))
+				break;
+			if (sk->sk_err) {
+				copied = sock_error(sk);
+				break;
+			}
+			if (sk->sk_shutdown & RCV_SHUTDOWN)
+				break;
+			if (sk->sk_state == TCP_CLOSE) {
+				copied = -ENOTCONN;
+				break;
+			}
+			if (!timeo) {
+				copied = -EAGAIN;
+				break;
+			}
+			if (signal_pending(current)) {
+				copied = sock_intr_errno(timeo);
+				break;
+			}
+		}
+
+		if (sk->sk_backlog.tail) {
+			release_sock(sk);
+			lock_sock(sk);
+			chtls_cleanup_rbuf(sk, copied);
+			continue;
+		}
+
+		if (copied >= target)
+			break;
+		chtls_cleanup_rbuf(sk, copied);
+		sk_wait_data(sk, &timeo, NULL);
+		continue;
+
+found_ok_skb:
+		if (!skb->len) {
+			chtls_kfree_skb(sk, skb);
+			if (!copied && !timeo) {
+				copied = -EAGAIN;
+				break;
+			}
+
+			if (copied < target)
+				continue;
+
+			break;
+		}
+
+		offset = tp->copied_seq - ULP_SKB_CB(skb)->seq;
+		avail = skb->len - offset;
+		if (len < avail)
+			avail = len;
+
+		if (unlikely(tp->urg_data)) {
+			u32 urg_offset = tp->urg_seq - tp->copied_seq;
+
+			if (urg_offset < avail) {
+				if (urg_offset) {
+					avail = urg_offset;
+				} else if (!sock_flag(sk, SOCK_URGINLINE)) {
+					tp->copied_seq++;
+					offset++;
+					avail--;
+					if (!avail)
+						goto skip_copy;
+				}
+			}
+		}
+
+		if (likely(!(flags & MSG_TRUNC))) {
+			if (skb_copy_datagram_msg(skb, offset,
+						  msg, avail)) {
+				if (!copied) {
+					copied = -EFAULT;
+					break;
+				}
+			}
+		}
+
+		tp->copied_seq += avail;
+		copied += avail;
+		len -= avail;
+
+skip_copy:
+		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq))
+			tp->urg_data = 0;
+
+		if (avail + offset >= skb->len) {
+			if (likely(skb))
+				chtls_free_skb(sk, skb);
+			buffers_freed++;
+
+			if  (copied >= target &&
+			     !skb_peek(&sk->sk_receive_queue))
+				break;
+		}
+	} while (len > 0);
+
+	if (buffers_freed)
+		chtls_cleanup_rbuf(sk, copied);
+
+	release_sock(sk);
+	return copied;
+}
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
new file mode 100644
index 0000000..f59b044
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Atul Gupta (atul.gupta@chelsio.com)
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/hash.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+#include <net/tls.h>
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+#define DRV_NAME "chtls"
+
+/*
+ * chtls device management
+ * maintains a list of the chtls devices
+ */
+static LIST_HEAD(cdev_list);
+static DEFINE_MUTEX(cdev_mutex);
+static DEFINE_MUTEX(cdev_list_lock);
+
+static DEFINE_MUTEX(notify_mutex);
+static RAW_NOTIFIER_HEAD(listen_notify_list);
+static struct proto chtls_cpl_prot;
+struct request_sock_ops chtls_rsk_ops;
+static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
+
+static void register_listen_notifier(struct notifier_block *nb)
+{
+	mutex_lock(&notify_mutex);
+	raw_notifier_chain_register(&listen_notify_list, nb);
+	mutex_unlock(&notify_mutex);
+}
+
+static void unregister_listen_notifier(struct notifier_block *nb)
+{
+	mutex_lock(&notify_mutex);
+	raw_notifier_chain_unregister(&listen_notify_list, nb);
+	mutex_unlock(&notify_mutex);
+}
+
+static int listen_notify_handler(struct notifier_block *this,
+				 unsigned long event, void *data)
+{
+	struct chtls_dev *cdev;
+	struct sock *sk;
+	int ret;
+
+	sk = data;
+	ret =  NOTIFY_DONE;
+
+	switch (event) {
+	case CHTLS_LISTEN_START:
+	case CHTLS_LISTEN_STOP:
+		mutex_lock(&cdev_list_lock);
+		list_for_each_entry(cdev, &cdev_list, list) {
+			if (event == CHTLS_LISTEN_START)
+				ret = chtls_listen_start(cdev, sk);
+			else
+				chtls_listen_stop(cdev, sk);
+		}
+		mutex_unlock(&cdev_list_lock);
+		break;
+	}
+	return ret;
+}
+
+static struct notifier_block listen_notifier = {
+	.notifier_call = listen_notify_handler
+};
+
+static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+	if (likely(skb_transport_header(skb) != skb_network_header(skb)))
+		return tcp_v4_do_rcv(sk, skb);
+	BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
+	return 0;
+}
+
+static int chtls_start_listen(struct sock *sk)
+{
+	int err;
+
+	if (sk->sk_protocol != IPPROTO_TCP)
+		return -EPROTONOSUPPORT;
+
+	if (sk->sk_family == PF_INET &&
+	    LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
+		return -EADDRNOTAVAIL;
+
+	sk->sk_backlog_rcv = listen_backlog_rcv;
+	mutex_lock(&notify_mutex);
+	err = raw_notifier_call_chain(&listen_notify_list,
+				      CHTLS_LISTEN_START, sk);
+	mutex_unlock(&notify_mutex);
+	return err;
+}
+
+static void chtls_stop_listen(struct sock *sk)
+{
+	if (sk->sk_protocol != IPPROTO_TCP)
+		return;
+
+	mutex_lock(&notify_mutex);
+	raw_notifier_call_chain(&listen_notify_list,
+				CHTLS_LISTEN_STOP, sk);
+	mutex_unlock(&notify_mutex);
+}
+
+static int chtls_inline_feature(struct tls_device *dev)
+{
+	struct net_device *netdev;
+	struct chtls_dev *cdev;
+	int i;
+
+	cdev = to_chtls_dev(dev);
+
+	for (i = 0; i < cdev->lldi->nports; i++) {
+		netdev = cdev->ports[i];
+		if (netdev->features & NETIF_F_HW_TLS_RECORD)
+			return 1;
+	}
+	return 0;
+}
+
+static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
+{
+	if (sk->sk_state == TCP_LISTEN)
+		return chtls_start_listen(sk);
+	return 0;
+}
+
+static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
+{
+	if (sk->sk_state == TCP_LISTEN)
+		chtls_stop_listen(sk);
+}
+
+static void chtls_register_dev(struct chtls_dev *cdev)
+{
+	struct tls_device *tlsdev = &cdev->tlsdev;
+
+	strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
+	strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
+		TLS_DEVICE_NAME_MAX);
+	tlsdev->feature = chtls_inline_feature;
+	tlsdev->hash = chtls_create_hash;
+	tlsdev->unhash = chtls_destroy_hash;
+	tls_register_device(&cdev->tlsdev);
+	cdev->cdev_state = CHTLS_CDEV_STATE_UP;
+}
+
+static void chtls_unregister_dev(struct chtls_dev *cdev)
+{
+	tls_unregister_device(&cdev->tlsdev);
+}
+
+static void process_deferq(struct work_struct *task_param)
+{
+	struct chtls_dev *cdev = container_of(task_param,
+				struct chtls_dev, deferq_task);
+	struct sk_buff *skb;
+
+	spin_lock_bh(&cdev->deferq.lock);
+	while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
+		spin_unlock_bh(&cdev->deferq.lock);
+		DEFERRED_SKB_CB(skb)->handler(cdev, skb);
+		spin_lock_bh(&cdev->deferq.lock);
+	}
+	spin_unlock_bh(&cdev->deferq.lock);
+}
+
+static int chtls_get_skb(struct chtls_dev *cdev)
+{
+	cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
+	if (!cdev->askb)
+		return -ENOMEM;
+
+	skb_put(cdev->askb, sizeof(struct tcphdr));
+	skb_reset_transport_header(cdev->askb);
+	memset(cdev->askb->data, 0, cdev->askb->len);
+	return 0;
+}
+
+static void *chtls_uld_add(const struct cxgb4_lld_info *info)
+{
+	struct cxgb4_lld_info *lldi;
+	struct chtls_dev *cdev;
+	int i, j;
+
+	cdev = kzalloc(sizeof(*cdev) + info->nports *
+		      (sizeof(struct net_device *)), GFP_KERNEL);
+	if (!cdev)
+		goto out;
+
+	lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
+	if (!lldi)
+		goto out_lldi;
+
+	if (chtls_get_skb(cdev))
+		goto out_skb;
+
+	*lldi = *info;
+	cdev->lldi = lldi;
+	cdev->pdev = lldi->pdev;
+	cdev->tids = lldi->tids;
+	cdev->ports = lldi->ports;
+	cdev->mtus = lldi->mtus;
+	cdev->tids = lldi->tids;
+	cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
+			<< FW_VIID_PFN_S;
+
+	for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
+		unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
+
+		cdev->rspq_skb_cache[i] = __alloc_skb(size,
+						      gfp_any(), 0,
+						      lldi->nodeid);
+		if (unlikely(!cdev->rspq_skb_cache[i]))
+			goto out_rspq_skb;
+	}
+
+	idr_init(&cdev->hwtid_idr);
+	INIT_WORK(&cdev->deferq_task, process_deferq);
+	spin_lock_init(&cdev->listen_lock);
+	spin_lock_init(&cdev->idr_lock);
+	cdev->send_page_order = min_t(uint, get_order(32768),
+				      send_page_order);
+	cdev->max_host_sndbuf = 48 * 1024;
+
+	if (lldi->vr->key.size)
+		if (chtls_init_kmap(cdev, lldi))
+			goto out_rspq_skb;
+
+	mutex_lock(&cdev_mutex);
+	list_add_tail(&cdev->list, &cdev_list);
+	mutex_unlock(&cdev_mutex);
+
+	return cdev;
+out_rspq_skb:
+	for (j = 0; j < i; j++)
+		kfree_skb(cdev->rspq_skb_cache[j]);
+	kfree_skb(cdev->askb);
+out_skb:
+	kfree(lldi);
+out_lldi:
+	kfree(cdev);
+out:
+	return NULL;
+}
+
+static void chtls_free_uld(struct chtls_dev *cdev)
+{
+	int i;
+
+	chtls_unregister_dev(cdev);
+	kvfree(cdev->kmap.addr);
+	idr_destroy(&cdev->hwtid_idr);
+	for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
+		kfree_skb(cdev->rspq_skb_cache[i]);
+	kfree(cdev->lldi);
+	if (cdev->askb)
+		kfree_skb(cdev->askb);
+	kfree(cdev);
+}
+
+static void chtls_free_all_uld(void)
+{
+	struct chtls_dev *cdev, *tmp;
+
+	mutex_lock(&cdev_mutex);
+	list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
+		if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
+			chtls_free_uld(cdev);
+	}
+	mutex_unlock(&cdev_mutex);
+}
+
+static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
+{
+	struct chtls_dev *cdev = handle;
+
+	switch (new_state) {
+	case CXGB4_STATE_UP:
+		chtls_register_dev(cdev);
+		break;
+	case CXGB4_STATE_DOWN:
+		break;
+	case CXGB4_STATE_START_RECOVERY:
+		break;
+	case CXGB4_STATE_DETACH:
+		mutex_lock(&cdev_mutex);
+		list_del(&cdev->list);
+		mutex_unlock(&cdev_mutex);
+		chtls_free_uld(cdev);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
+					  const __be64 *rsp,
+					  u32 pktshift)
+{
+	struct sk_buff *skb;
+
+	/* Allocate space for cpl_pass_accpet_req which will be synthesized by
+	 * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
+	 * through the regular cpl_pass_accept_req processing in TOM.
+	 */
+	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
+			- pktshift, GFP_ATOMIC);
+	if (unlikely(!skb))
+		return NULL;
+	__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
+		   - pktshift);
+	/* For now we will copy  cpl_rx_pkt in the skb */
+	skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
+	skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
+				       , gl->va + pktshift,
+				       gl->tot_len - pktshift);
+
+	return skb;
+}
+
+static int chtls_recv_packet(struct chtls_dev *cdev,
+			     const struct pkt_gl *gl, const __be64 *rsp)
+{
+	unsigned int opcode = *(u8 *)rsp;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
+	if (!skb)
+		return -ENOMEM;
+
+	ret = chtls_handlers[opcode](cdev, skb);
+	if (ret & CPL_RET_BUF_DONE)
+		kfree_skb(skb);
+
+	return 0;
+}
+
+static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
+{
+	unsigned long rspq_bin;
+	unsigned int opcode;
+	struct sk_buff *skb;
+	unsigned int len;
+	int ret;
+
+	len = 64 - sizeof(struct rsp_ctrl) - 8;
+	opcode = *(u8 *)rsp;
+
+	rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
+	skb = cdev->rspq_skb_cache[rspq_bin];
+	if (skb && !skb_is_nonlinear(skb) &&
+	    !skb_shared(skb) && !skb_cloned(skb)) {
+		refcount_inc(&skb->users);
+		if (refcount_read(&skb->users) == 2) {
+			__skb_trim(skb, 0);
+			if (skb_tailroom(skb) >= len)
+				goto copy_out;
+		}
+		refcount_dec(&skb->users);
+	}
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (unlikely(!skb))
+		return -ENOMEM;
+
+copy_out:
+	__skb_put(skb, len);
+	skb_copy_to_linear_data(skb, rsp, len);
+	skb_reset_network_header(skb);
+	skb_reset_transport_header(skb);
+	ret = chtls_handlers[opcode](cdev, skb);
+
+	if (ret & CPL_RET_BUF_DONE)
+		kfree_skb(skb);
+	return 0;
+}
+
+static void chtls_recv(struct chtls_dev *cdev,
+		       struct sk_buff **skbs, const __be64 *rsp)
+{
+	struct sk_buff *skb = *skbs;
+	unsigned int opcode;
+	int ret;
+
+	opcode = *(u8 *)rsp;
+
+	__skb_push(skb, sizeof(struct rss_header));
+	skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
+
+	ret = chtls_handlers[opcode](cdev, skb);
+	if (ret & CPL_RET_BUF_DONE)
+		kfree_skb(skb);
+}
+
+static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
+				const struct pkt_gl *gl)
+{
+	struct chtls_dev *cdev = handle;
+	unsigned int opcode;
+	struct sk_buff *skb;
+
+	opcode = *(u8 *)rsp;
+
+	if (unlikely(opcode == CPL_RX_PKT)) {
+		if (chtls_recv_packet(cdev, gl, rsp) < 0)
+			goto nomem;
+		return 0;
+	}
+
+	if (!gl)
+		return chtls_recv_rsp(cdev, rsp);
+
+#define RX_PULL_LEN 128
+	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+	if (unlikely(!skb))
+		goto nomem;
+	chtls_recv(cdev, &skb, rsp);
+	return 0;
+
+nomem:
+	return -ENOMEM;
+}
+
+static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
+			       int __user *optlen)
+{
+	struct tls_crypto_info crypto_info = { 0 };
+
+	crypto_info.version = TLS_1_2_VERSION;
+	if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
+		return -EFAULT;
+	return 0;
+}
+
+static int chtls_getsockopt(struct sock *sk, int level, int optname,
+			    char __user *optval, int __user *optlen)
+{
+	struct tls_context *ctx = tls_get_ctx(sk);
+
+	if (level != SOL_TLS)
+		return ctx->getsockopt(sk, level, optname, optval, optlen);
+
+	return do_chtls_getsockopt(sk, optval, optlen);
+}
+
+static int do_chtls_setsockopt(struct sock *sk, int optname,
+			       char __user *optval, unsigned int optlen)
+{
+	struct tls_crypto_info *crypto_info, tmp_crypto_info;
+	struct chtls_sock *csk;
+	int keylen;
+	int rc = 0;
+
+	csk = rcu_dereference_sk_user_data(sk);
+
+	if (!optval || optlen < sizeof(*crypto_info)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
+	if (rc) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	/* check version */
+	if (tmp_crypto_info.version != TLS_1_2_VERSION) {
+		rc = -ENOTSUPP;
+		goto out;
+	}
+
+	crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
+
+	switch (tmp_crypto_info.cipher_type) {
+	case TLS_CIPHER_AES_GCM_128: {
+		/* Obtain version and type from previous copy */
+		crypto_info[0] = tmp_crypto_info;
+		/* Now copy the following data */
+		rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
+				optval + sizeof(*crypto_info),
+				sizeof(struct tls12_crypto_info_aes_gcm_128)
+				- sizeof(*crypto_info));
+
+		if (rc) {
+			rc = -EFAULT;
+			goto out;
+		}
+
+		keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+		rc = chtls_setkey(csk, keylen, optname);
+		break;
+	}
+	default:
+		rc = -EINVAL;
+		goto out;
+	}
+out:
+	return rc;
+}
+
+static int chtls_setsockopt(struct sock *sk, int level, int optname,
+			    char __user *optval, unsigned int optlen)
+{
+	struct tls_context *ctx = tls_get_ctx(sk);
+
+	if (level != SOL_TLS)
+		return ctx->setsockopt(sk, level, optname, optval, optlen);
+
+	return do_chtls_setsockopt(sk, optname, optval, optlen);
+}
+
+static struct cxgb4_uld_info chtls_uld_info = {
+	.name = DRV_NAME,
+	.nrxq = MAX_ULD_QSETS,
+	.ntxq = MAX_ULD_QSETS,
+	.rxq_size = 1024,
+	.add = chtls_uld_add,
+	.state_change = chtls_uld_state_change,
+	.rx_handler = chtls_uld_rx_handler,
+};
+
+void chtls_install_cpl_ops(struct sock *sk)
+{
+	sk->sk_prot = &chtls_cpl_prot;
+}
+
+static void __init chtls_init_ulp_ops(void)
+{
+	chtls_cpl_prot			= tcp_prot;
+	chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
+			   &tcp_prot, PF_INET);
+	chtls_cpl_prot.close		= chtls_close;
+	chtls_cpl_prot.disconnect	= chtls_disconnect;
+	chtls_cpl_prot.destroy		= chtls_destroy_sock;
+	chtls_cpl_prot.shutdown		= chtls_shutdown;
+	chtls_cpl_prot.sendmsg		= chtls_sendmsg;
+	chtls_cpl_prot.sendpage		= chtls_sendpage;
+	chtls_cpl_prot.recvmsg		= chtls_recvmsg;
+	chtls_cpl_prot.setsockopt	= chtls_setsockopt;
+	chtls_cpl_prot.getsockopt	= chtls_getsockopt;
+}
+
+static int __init chtls_register(void)
+{
+	chtls_init_ulp_ops();
+	register_listen_notifier(&listen_notifier);
+	cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
+	return 0;
+}
+
+static void __exit chtls_unregister(void)
+{
+	unregister_listen_notifier(&listen_notifier);
+	chtls_free_all_uld();
+	cxgb4_unregister_uld(CXGB4_ULD_TLS);
+}
+
+module_init(chtls_register);
+module_exit(chtls_unregister);
+
+MODULE_DESCRIPTION("Chelsio TLS Inline driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
new file mode 100644
index 0000000..2cfabb9
--- /dev/null
+++ b/drivers/crypto/exynos-rng.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * exynos-rng.c - Random Number Generator driver for the Exynos
+ *
+ * Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
+ * Copyright (C) 2012 Samsung Electronics
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/internal/rng.h>
+
+#define EXYNOS_RNG_CONTROL		0x0
+#define EXYNOS_RNG_STATUS		0x10
+
+#define EXYNOS_RNG_SEED_CONF		0x14
+#define EXYNOS_RNG_GEN_PRNG	        BIT(1)
+
+#define EXYNOS_RNG_SEED_BASE		0x140
+#define EXYNOS_RNG_SEED(n)		(EXYNOS_RNG_SEED_BASE + (n * 0x4))
+#define EXYNOS_RNG_OUT_BASE		0x160
+#define EXYNOS_RNG_OUT(n)		(EXYNOS_RNG_OUT_BASE + (n * 0x4))
+
+/* EXYNOS_RNG_CONTROL bit fields */
+#define EXYNOS_RNG_CONTROL_START	0x18
+/* EXYNOS_RNG_STATUS bit fields */
+#define EXYNOS_RNG_STATUS_SEED_SETTING_DONE	BIT(1)
+#define EXYNOS_RNG_STATUS_RNG_DONE		BIT(5)
+
+/* Five seed and output registers, each 4 bytes */
+#define EXYNOS_RNG_SEED_REGS		5
+#define EXYNOS_RNG_SEED_SIZE		(EXYNOS_RNG_SEED_REGS * 4)
+
+enum exynos_prng_type {
+	EXYNOS_PRNG_UNKNOWN = 0,
+	EXYNOS_PRNG_EXYNOS4,
+	EXYNOS_PRNG_EXYNOS5,
+};
+
+/*
+ * Driver re-seeds itself with generated random numbers to hinder
+ * backtracking of the original seed.
+ *
+ * Time for next re-seed in ms.
+ */
+#define EXYNOS_RNG_RESEED_TIME		1000
+#define EXYNOS_RNG_RESEED_BYTES		65536
+
+/*
+ * In polling mode, do not wait infinitely for the engine to finish the work.
+ */
+#define EXYNOS_RNG_WAIT_RETRIES		100
+
+/* Context for crypto */
+struct exynos_rng_ctx {
+	struct exynos_rng_dev		*rng;
+};
+
+/* Device associated memory */
+struct exynos_rng_dev {
+	struct device			*dev;
+	enum exynos_prng_type		type;
+	void __iomem			*mem;
+	struct clk			*clk;
+	struct mutex 			lock;
+	/* Generated numbers stored for seeding during resume */
+	u8				seed_save[EXYNOS_RNG_SEED_SIZE];
+	unsigned int			seed_save_len;
+	/* Time of last seeding in jiffies */
+	unsigned long			last_seeding;
+	/* Bytes generated since last seeding */
+	unsigned long			bytes_seeding;
+};
+
+static struct exynos_rng_dev *exynos_rng_dev;
+
+static u32 exynos_rng_readl(struct exynos_rng_dev *rng, u32 offset)
+{
+	return readl_relaxed(rng->mem + offset);
+}
+
+static void exynos_rng_writel(struct exynos_rng_dev *rng, u32 val, u32 offset)
+{
+	writel_relaxed(val, rng->mem + offset);
+}
+
+static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
+			       const u8 *seed, unsigned int slen)
+{
+	u32 val;
+	int i;
+
+	/* Round seed length because loop iterates over full register size */
+	slen = ALIGN_DOWN(slen, 4);
+
+	if (slen < EXYNOS_RNG_SEED_SIZE)
+		return -EINVAL;
+
+	for (i = 0; i < slen ; i += 4) {
+		unsigned int seed_reg = (i / 4) % EXYNOS_RNG_SEED_REGS;
+
+		val = seed[i] << 24;
+		val |= seed[i + 1] << 16;
+		val |= seed[i + 2] << 8;
+		val |= seed[i + 3] << 0;
+
+		exynos_rng_writel(rng, val, EXYNOS_RNG_SEED(seed_reg));
+	}
+
+	val = exynos_rng_readl(rng, EXYNOS_RNG_STATUS);
+	if (!(val & EXYNOS_RNG_STATUS_SEED_SETTING_DONE)) {
+		dev_warn(rng->dev, "Seed setting not finished\n");
+		return -EIO;
+	}
+
+	rng->last_seeding = jiffies;
+	rng->bytes_seeding = 0;
+
+	return 0;
+}
+
+/*
+ * Start the engine and poll for finish.  Then read from output registers
+ * filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
+ * random data (EXYNOS_RNG_SEED_SIZE).
+ *
+ * On success: return 0 and store number of read bytes under 'read' address.
+ * On error: return -ERRNO.
+ */
+static int exynos_rng_get_random(struct exynos_rng_dev *rng,
+				 u8 *dst, unsigned int dlen,
+				 unsigned int *read)
+{
+	int retry = EXYNOS_RNG_WAIT_RETRIES;
+
+	if (rng->type == EXYNOS_PRNG_EXYNOS4) {
+		exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+				  EXYNOS_RNG_CONTROL);
+	} else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
+		exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
+				  EXYNOS_RNG_SEED_CONF);
+	}
+
+	while (!(exynos_rng_readl(rng,
+			EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
+		cpu_relax();
+
+	if (!retry)
+		return -ETIMEDOUT;
+
+	/* Clear status bit */
+	exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
+			  EXYNOS_RNG_STATUS);
+	*read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
+	memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
+	rng->bytes_seeding += *read;
+
+	return 0;
+}
+
+/* Re-seed itself from time to time */
+static void exynos_rng_reseed(struct exynos_rng_dev *rng)
+{
+	unsigned long next_seeding = rng->last_seeding + \
+				     msecs_to_jiffies(EXYNOS_RNG_RESEED_TIME);
+	unsigned long now = jiffies;
+	unsigned int read = 0;
+	u8 seed[EXYNOS_RNG_SEED_SIZE];
+
+	if (time_before(now, next_seeding) &&
+	    rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES)
+		return;
+
+	if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
+		return;
+
+	exynos_rng_set_seed(rng, seed, read);
+
+	/* Let others do some of their job. */
+	mutex_unlock(&rng->lock);
+	mutex_lock(&rng->lock);
+}
+
+static int exynos_rng_generate(struct crypto_rng *tfm,
+			       const u8 *src, unsigned int slen,
+			       u8 *dst, unsigned int dlen)
+{
+	struct exynos_rng_ctx *ctx = crypto_rng_ctx(tfm);
+	struct exynos_rng_dev *rng = ctx->rng;
+	unsigned int read = 0;
+	int ret;
+
+	ret = clk_prepare_enable(rng->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&rng->lock);
+	do {
+		ret = exynos_rng_get_random(rng, dst, dlen, &read);
+		if (ret)
+			break;
+
+		dlen -= read;
+		dst += read;
+
+		exynos_rng_reseed(rng);
+	} while (dlen > 0);
+	mutex_unlock(&rng->lock);
+
+	clk_disable_unprepare(rng->clk);
+
+	return ret;
+}
+
+static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+			   unsigned int slen)
+{
+	struct exynos_rng_ctx *ctx = crypto_rng_ctx(tfm);
+	struct exynos_rng_dev *rng = ctx->rng;
+	int ret;
+
+	ret = clk_prepare_enable(rng->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&rng->lock);
+	ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+	mutex_unlock(&rng->lock);
+
+	clk_disable_unprepare(rng->clk);
+
+	return ret;
+}
+
+static int exynos_rng_kcapi_init(struct crypto_tfm *tfm)
+{
+	struct exynos_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->rng = exynos_rng_dev;
+
+	return 0;
+}
+
+static struct rng_alg exynos_rng_alg = {
+	.generate		= exynos_rng_generate,
+	.seed			= exynos_rng_seed,
+	.seedsize		= EXYNOS_RNG_SEED_SIZE,
+	.base			= {
+		.cra_name		= "stdrng",
+		.cra_driver_name	= "exynos_rng",
+		.cra_priority		= 300,
+		.cra_ctxsize		= sizeof(struct exynos_rng_ctx),
+		.cra_module		= THIS_MODULE,
+		.cra_init		= exynos_rng_kcapi_init,
+	}
+};
+
+static int exynos_rng_probe(struct platform_device *pdev)
+{
+	struct exynos_rng_dev *rng;
+	struct resource *res;
+	int ret;
+
+	if (exynos_rng_dev)
+		return -EEXIST;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+
+	mutex_init(&rng->lock);
+
+	rng->dev = &pdev->dev;
+	rng->clk = devm_clk_get(&pdev->dev, "secss");
+	if (IS_ERR(rng->clk)) {
+		dev_err(&pdev->dev, "Couldn't get clock.\n");
+		return PTR_ERR(rng->clk);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rng->mem = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rng->mem))
+		return PTR_ERR(rng->mem);
+
+	platform_set_drvdata(pdev, rng);
+
+	exynos_rng_dev = rng;
+
+	ret = crypto_register_rng(&exynos_rng_alg);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Couldn't register rng crypto alg: %d\n", ret);
+		exynos_rng_dev = NULL;
+	}
+
+	return ret;
+}
+
+static int exynos_rng_remove(struct platform_device *pdev)
+{
+	crypto_unregister_rng(&exynos_rng_alg);
+
+	exynos_rng_dev = NULL;
+
+	return 0;
+}
+
+static int __maybe_unused exynos_rng_suspend(struct device *dev)
+{
+	struct exynos_rng_dev *rng = dev_get_drvdata(dev);
+	int ret;
+
+	/* If we were never seeded then after resume it will be the same */
+	if (!rng->last_seeding)
+		return 0;
+
+	rng->seed_save_len = 0;
+	ret = clk_prepare_enable(rng->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&rng->lock);
+
+	/* Get new random numbers and store them for seeding on resume. */
+	exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
+			      &(rng->seed_save_len));
+
+	mutex_unlock(&rng->lock);
+
+	dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
+		rng->seed_save_len);
+
+	clk_disable_unprepare(rng->clk);
+
+	return 0;
+}
+
+static int __maybe_unused exynos_rng_resume(struct device *dev)
+{
+	struct exynos_rng_dev *rng = dev_get_drvdata(dev);
+	int ret;
+
+	/* Never seeded so nothing to do */
+	if (!rng->last_seeding)
+		return 0;
+
+	ret = clk_prepare_enable(rng->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&rng->lock);
+
+	ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
+
+	mutex_unlock(&rng->lock);
+
+	clk_disable_unprepare(rng->clk);
+
+	return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
+			 exynos_rng_resume);
+
+static const struct of_device_id exynos_rng_dt_match[] = {
+	{
+		.compatible = "samsung,exynos4-rng",
+		.data = (const void *)EXYNOS_PRNG_EXYNOS4,
+	}, {
+		.compatible = "samsung,exynos5250-prng",
+		.data = (const void *)EXYNOS_PRNG_EXYNOS5,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, exynos_rng_dt_match);
+
+static struct platform_driver exynos_rng_driver = {
+	.driver		= {
+		.name	= "exynos-rng",
+		.pm	= &exynos_rng_pm_ops,
+		.of_match_table = exynos_rng_dt_match,
+	},
+	.probe		= exynos_rng_probe,
+	.remove		= exynos_rng_remove,
+};
+
+module_platform_driver(exynos_rng_driver);
+
+MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
new file mode 100644
index 0000000..eb2a0a7
--- /dev/null
+++ b/drivers/crypto/geode-aes.c
@@ -0,0 +1,593 @@
+ /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "geode-aes.h"
+
+/* Static structures */
+
+static void __iomem *_iobase;
+static spinlock_t lock;
+
+/* Write a 128 bit field (either a writable key or IV) */
+static inline void
+_writefield(u32 offset, void *value)
+{
+	int i;
+
+	for (i = 0; i < 4; i++)
+		iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
+}
+
+/* Read a 128 bit field (either a writable key or IV) */
+static inline void
+_readfield(u32 offset, void *value)
+{
+	int i;
+
+	for (i = 0; i < 4; i++)
+		((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
+}
+
+static int
+do_crypt(void *src, void *dst, int len, u32 flags)
+{
+	u32 status;
+	u32 counter = AES_OP_TIMEOUT;
+
+	iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
+	iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
+	iowrite32(len,  _iobase + AES_LENA_REG);
+
+	/* Start the operation */
+	iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
+
+	do {
+		status = ioread32(_iobase + AES_INTR_REG);
+		cpu_relax();
+	} while (!(status & AES_INTRA_PENDING) && --counter);
+
+	/* Clear the event */
+	iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
+	return counter ? 0 : 1;
+}
+
+static unsigned int
+geode_aes_crypt(struct geode_aes_op *op)
+{
+	u32 flags = 0;
+	unsigned long iflags;
+	int ret;
+
+	if (op->len == 0)
+		return 0;
+
+	/* If the source and destination is the same, then
+	 * we need to turn on the coherent flags, otherwise
+	 * we don't need to worry
+	 */
+
+	flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
+
+	if (op->dir == AES_DIR_ENCRYPT)
+		flags |= AES_CTRL_ENCRYPT;
+
+	/* Start the critical section */
+
+	spin_lock_irqsave(&lock, iflags);
+
+	if (op->mode == AES_MODE_CBC) {
+		flags |= AES_CTRL_CBC;
+		_writefield(AES_WRITEIV0_REG, op->iv);
+	}
+
+	if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
+		flags |= AES_CTRL_WRKEY;
+		_writefield(AES_WRITEKEY0_REG, op->key);
+	}
+
+	ret = do_crypt(op->src, op->dst, op->len, flags);
+	BUG_ON(ret);
+
+	if (op->mode == AES_MODE_CBC)
+		_readfield(AES_WRITEIV0_REG, op->iv);
+
+	spin_unlock_irqrestore(&lock, iflags);
+
+	return op->len;
+}
+
+/* CRYPTO-API Functions */
+
+static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
+		unsigned int len)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+	unsigned int ret;
+
+	op->keylen = len;
+
+	if (len == AES_KEYSIZE_128) {
+		memcpy(op->key, key, len);
+		return 0;
+	}
+
+	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+		/* not supported at all */
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	/*
+	 * The requested key size is not supported by HW, do a fallback
+	 */
+	op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+	op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+
+	ret = crypto_cipher_setkey(op->fallback.cip, key, len);
+	if (ret) {
+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+		tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
+	}
+	return ret;
+}
+
+static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+		unsigned int len)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+	unsigned int ret;
+
+	op->keylen = len;
+
+	if (len == AES_KEYSIZE_128) {
+		memcpy(op->key, key, len);
+		return 0;
+	}
+
+	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+		/* not supported at all */
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	/*
+	 * The requested key size is not supported by HW, do a fallback
+	 */
+	op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+	op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+
+	ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
+	if (ret) {
+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+		tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
+	}
+	return ret;
+}
+
+static int fallback_blk_dec(struct blkcipher_desc *desc,
+		struct scatterlist *dst, struct scatterlist *src,
+		unsigned int nbytes)
+{
+	unsigned int ret;
+	struct crypto_blkcipher *tfm;
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+
+	tfm = desc->tfm;
+	desc->tfm = op->fallback.blk;
+
+	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+	desc->tfm = tfm;
+	return ret;
+}
+static int fallback_blk_enc(struct blkcipher_desc *desc,
+		struct scatterlist *dst, struct scatterlist *src,
+		unsigned int nbytes)
+{
+	unsigned int ret;
+	struct crypto_blkcipher *tfm;
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+
+	tfm = desc->tfm;
+	desc->tfm = op->fallback.blk;
+
+	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+	desc->tfm = tfm;
+	return ret;
+}
+
+static void
+geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
+		crypto_cipher_encrypt_one(op->fallback.cip, out, in);
+		return;
+	}
+
+	op->src = (void *) in;
+	op->dst = (void *) out;
+	op->mode = AES_MODE_ECB;
+	op->flags = 0;
+	op->len = AES_BLOCK_SIZE;
+	op->dir = AES_DIR_ENCRYPT;
+
+	geode_aes_crypt(op);
+}
+
+
+static void
+geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
+		crypto_cipher_decrypt_one(op->fallback.cip, out, in);
+		return;
+	}
+
+	op->src = (void *) in;
+	op->dst = (void *) out;
+	op->mode = AES_MODE_ECB;
+	op->flags = 0;
+	op->len = AES_BLOCK_SIZE;
+	op->dir = AES_DIR_DECRYPT;
+
+	geode_aes_crypt(op);
+}
+
+static int fallback_init_cip(struct crypto_tfm *tfm)
+{
+	const char *name = crypto_tfm_alg_name(tfm);
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	op->fallback.cip = crypto_alloc_cipher(name, 0,
+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+	if (IS_ERR(op->fallback.cip)) {
+		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+		return PTR_ERR(op->fallback.cip);
+	}
+
+	return 0;
+}
+
+static void fallback_exit_cip(struct crypto_tfm *tfm)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	crypto_free_cipher(op->fallback.cip);
+	op->fallback.cip = NULL;
+}
+
+static struct crypto_alg geode_alg = {
+	.cra_name			=	"aes",
+	.cra_driver_name	=	"geode-aes",
+	.cra_priority		=	300,
+	.cra_alignmask		=	15,
+	.cra_flags			=	CRYPTO_ALG_TYPE_CIPHER |
+							CRYPTO_ALG_NEED_FALLBACK,
+	.cra_init			=	fallback_init_cip,
+	.cra_exit			=	fallback_exit_cip,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+	.cra_module			=	THIS_MODULE,
+	.cra_u				=	{
+		.cipher	=	{
+			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
+			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
+			.cia_setkey			=	geode_setkey_cip,
+			.cia_encrypt		=	geode_encrypt,
+			.cia_decrypt		=	geode_decrypt
+		}
+	}
+};
+
+static int
+geode_cbc_decrypt(struct blkcipher_desc *desc,
+		  struct scatterlist *dst, struct scatterlist *src,
+		  unsigned int nbytes)
+{
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err, ret;
+
+	if (unlikely(op->keylen != AES_KEYSIZE_128))
+		return fallback_blk_dec(desc, dst, src, nbytes);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+	op->iv = walk.iv;
+
+	while ((nbytes = walk.nbytes)) {
+		op->src = walk.src.virt.addr,
+		op->dst = walk.dst.virt.addr;
+		op->mode = AES_MODE_CBC;
+		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
+		op->dir = AES_DIR_DECRYPT;
+
+		ret = geode_aes_crypt(op);
+
+		nbytes -= ret;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static int
+geode_cbc_encrypt(struct blkcipher_desc *desc,
+		  struct scatterlist *dst, struct scatterlist *src,
+		  unsigned int nbytes)
+{
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err, ret;
+
+	if (unlikely(op->keylen != AES_KEYSIZE_128))
+		return fallback_blk_enc(desc, dst, src, nbytes);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+	op->iv = walk.iv;
+
+	while ((nbytes = walk.nbytes)) {
+		op->src = walk.src.virt.addr,
+		op->dst = walk.dst.virt.addr;
+		op->mode = AES_MODE_CBC;
+		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
+		op->dir = AES_DIR_ENCRYPT;
+
+		ret = geode_aes_crypt(op);
+		nbytes -= ret;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static int fallback_init_blk(struct crypto_tfm *tfm)
+{
+	const char *name = crypto_tfm_alg_name(tfm);
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	op->fallback.blk = crypto_alloc_blkcipher(name, 0,
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+	if (IS_ERR(op->fallback.blk)) {
+		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+		return PTR_ERR(op->fallback.blk);
+	}
+
+	return 0;
+}
+
+static void fallback_exit_blk(struct crypto_tfm *tfm)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	crypto_free_blkcipher(op->fallback.blk);
+	op->fallback.blk = NULL;
+}
+
+static struct crypto_alg geode_cbc_alg = {
+	.cra_name		=	"cbc(aes)",
+	.cra_driver_name	=	"cbc-aes-geode",
+	.cra_priority		=	400,
+	.cra_flags			=	CRYPTO_ALG_TYPE_BLKCIPHER |
+						CRYPTO_ALG_KERN_DRIVER_ONLY |
+						CRYPTO_ALG_NEED_FALLBACK,
+	.cra_init			=	fallback_init_blk,
+	.cra_exit			=	fallback_exit_blk,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+	.cra_alignmask		=	15,
+	.cra_type			=	&crypto_blkcipher_type,
+	.cra_module			=	THIS_MODULE,
+	.cra_u				=	{
+		.blkcipher	=	{
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey			=	geode_setkey_blk,
+			.encrypt		=	geode_cbc_encrypt,
+			.decrypt		=	geode_cbc_decrypt,
+			.ivsize			=	AES_BLOCK_SIZE,
+		}
+	}
+};
+
+static int
+geode_ecb_decrypt(struct blkcipher_desc *desc,
+		  struct scatterlist *dst, struct scatterlist *src,
+		  unsigned int nbytes)
+{
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err, ret;
+
+	if (unlikely(op->keylen != AES_KEYSIZE_128))
+		return fallback_blk_dec(desc, dst, src, nbytes);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		op->src = walk.src.virt.addr,
+		op->dst = walk.dst.virt.addr;
+		op->mode = AES_MODE_ECB;
+		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
+		op->dir = AES_DIR_DECRYPT;
+
+		ret = geode_aes_crypt(op);
+		nbytes -= ret;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static int
+geode_ecb_encrypt(struct blkcipher_desc *desc,
+		  struct scatterlist *dst, struct scatterlist *src,
+		  unsigned int nbytes)
+{
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err, ret;
+
+	if (unlikely(op->keylen != AES_KEYSIZE_128))
+		return fallback_blk_enc(desc, dst, src, nbytes);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		op->src = walk.src.virt.addr,
+		op->dst = walk.dst.virt.addr;
+		op->mode = AES_MODE_ECB;
+		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
+		op->dir = AES_DIR_ENCRYPT;
+
+		ret = geode_aes_crypt(op);
+		nbytes -= ret;
+		ret =  blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static struct crypto_alg geode_ecb_alg = {
+	.cra_name			=	"ecb(aes)",
+	.cra_driver_name	=	"ecb-aes-geode",
+	.cra_priority		=	400,
+	.cra_flags			=	CRYPTO_ALG_TYPE_BLKCIPHER |
+						CRYPTO_ALG_KERN_DRIVER_ONLY |
+						CRYPTO_ALG_NEED_FALLBACK,
+	.cra_init			=	fallback_init_blk,
+	.cra_exit			=	fallback_exit_blk,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+	.cra_alignmask		=	15,
+	.cra_type			=	&crypto_blkcipher_type,
+	.cra_module			=	THIS_MODULE,
+	.cra_u				=	{
+		.blkcipher	=	{
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey			=	geode_setkey_blk,
+			.encrypt		=	geode_ecb_encrypt,
+			.decrypt		=	geode_ecb_decrypt,
+		}
+	}
+};
+
+static void geode_aes_remove(struct pci_dev *dev)
+{
+	crypto_unregister_alg(&geode_alg);
+	crypto_unregister_alg(&geode_ecb_alg);
+	crypto_unregister_alg(&geode_cbc_alg);
+
+	pci_iounmap(dev, _iobase);
+	_iobase = NULL;
+
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+}
+
+
+static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int ret;
+
+	ret = pci_enable_device(dev);
+	if (ret)
+		return ret;
+
+	ret = pci_request_regions(dev, "geode-aes");
+	if (ret)
+		goto eenable;
+
+	_iobase = pci_iomap(dev, 0, 0);
+
+	if (_iobase == NULL) {
+		ret = -ENOMEM;
+		goto erequest;
+	}
+
+	spin_lock_init(&lock);
+
+	/* Clear any pending activity */
+	iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
+
+	ret = crypto_register_alg(&geode_alg);
+	if (ret)
+		goto eiomap;
+
+	ret = crypto_register_alg(&geode_ecb_alg);
+	if (ret)
+		goto ealg;
+
+	ret = crypto_register_alg(&geode_cbc_alg);
+	if (ret)
+		goto eecb;
+
+	dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
+	return 0;
+
+ eecb:
+	crypto_unregister_alg(&geode_ecb_alg);
+
+ ealg:
+	crypto_unregister_alg(&geode_alg);
+
+ eiomap:
+	pci_iounmap(dev, _iobase);
+
+ erequest:
+	pci_release_regions(dev);
+
+ eenable:
+	pci_disable_device(dev);
+
+	dev_err(&dev->dev, "GEODE AES initialization failed.\n");
+	return ret;
+}
+
+static struct pci_device_id geode_aes_tbl[] = {
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
+
+static struct pci_driver geode_aes_driver = {
+	.name = "Geode LX AES",
+	.id_table = geode_aes_tbl,
+	.probe = geode_aes_probe,
+	.remove = geode_aes_remove,
+};
+
+module_pci_driver(geode_aes_driver);
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+MODULE_DESCRIPTION("Geode LX Hardware AES driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
new file mode 100644
index 0000000..f442ca9
--- /dev/null
+++ b/drivers/crypto/geode-aes.h
@@ -0,0 +1,73 @@
+/* Copyright (C) 2003-2006, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _GEODE_AES_H_
+#define _GEODE_AES_H_
+
+/* driver logic flags */
+#define AES_MODE_ECB 0
+#define AES_MODE_CBC 1
+
+#define AES_DIR_DECRYPT 0
+#define AES_DIR_ENCRYPT 1
+
+#define AES_FLAGS_HIDDENKEY (1 << 0)
+
+/* Register definitions */
+
+#define AES_CTRLA_REG  0x0000
+
+#define AES_CTRL_START     0x01
+#define AES_CTRL_DECRYPT   0x00
+#define AES_CTRL_ENCRYPT   0x02
+#define AES_CTRL_WRKEY     0x04
+#define AES_CTRL_DCA       0x08
+#define AES_CTRL_SCA       0x10
+#define AES_CTRL_CBC       0x20
+
+#define AES_INTR_REG  0x0008
+
+#define AES_INTRA_PENDING (1 << 16)
+#define AES_INTRB_PENDING (1 << 17)
+
+#define AES_INTR_PENDING  (AES_INTRA_PENDING | AES_INTRB_PENDING)
+#define AES_INTR_MASK     0x07
+
+#define AES_SOURCEA_REG   0x0010
+#define AES_DSTA_REG      0x0014
+#define AES_LENA_REG      0x0018
+#define AES_WRITEKEY0_REG 0x0030
+#define AES_WRITEIV0_REG  0x0040
+
+/*  A very large counter that is used to gracefully bail out of an
+ *  operation in case of trouble
+ */
+
+#define AES_OP_TIMEOUT    0x50000
+
+struct geode_aes_op {
+
+	void *src;
+	void *dst;
+
+	u32 mode;
+	u32 dir;
+	u32 flags;
+	int len;
+
+	u8 key[AES_KEYSIZE_128];
+	u8 *iv;
+
+	union {
+		struct crypto_blkcipher *blk;
+		struct crypto_cipher *cip;
+	} fallback;
+	u32 keylen;
+};
+
+#endif
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
new file mode 100644
index 0000000..a5a36fe
--- /dev/null
+++ b/drivers/crypto/hifn_795x.c
@@ -0,0 +1,2694 @@
+/*
+ * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+#include <linux/crypto.h>
+#include <linux/hw_random.h>
+#include <linux/ktime.h>
+
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+static char hifn_pll_ref[sizeof("extNNN")] = "ext";
+module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
+MODULE_PARM_DESC(hifn_pll_ref,
+		 "PLL reference clock (pci[freq] or ext[freq], default ext)");
+
+static atomic_t hifn_dev_number;
+
+#define ACRYPTO_OP_DECRYPT	0
+#define ACRYPTO_OP_ENCRYPT	1
+#define ACRYPTO_OP_HMAC		2
+#define ACRYPTO_OP_RNG		3
+
+#define ACRYPTO_MODE_ECB		0
+#define ACRYPTO_MODE_CBC		1
+#define ACRYPTO_MODE_CFB		2
+#define ACRYPTO_MODE_OFB		3
+
+#define ACRYPTO_TYPE_AES_128	0
+#define ACRYPTO_TYPE_AES_192	1
+#define ACRYPTO_TYPE_AES_256	2
+#define ACRYPTO_TYPE_3DES	3
+#define ACRYPTO_TYPE_DES	4
+
+#define PCI_VENDOR_ID_HIFN		0x13A3
+#define PCI_DEVICE_ID_HIFN_7955		0x0020
+#define	PCI_DEVICE_ID_HIFN_7956		0x001d
+
+/* I/O region sizes */
+
+#define HIFN_BAR0_SIZE			0x1000
+#define HIFN_BAR1_SIZE			0x2000
+#define HIFN_BAR2_SIZE			0x8000
+
+/* DMA registres */
+
+#define HIFN_DMA_CRA			0x0C	/* DMA Command Ring Address */
+#define HIFN_DMA_SDRA			0x1C	/* DMA Source Data Ring Address */
+#define HIFN_DMA_RRA			0x2C	/* DMA Result Ring Address */
+#define HIFN_DMA_DDRA			0x3C	/* DMA Destination Data Ring Address */
+#define HIFN_DMA_STCTL			0x40	/* DMA Status and Control */
+#define HIFN_DMA_INTREN			0x44	/* DMA Interrupt Enable */
+#define HIFN_DMA_CFG1			0x48	/* DMA Configuration #1 */
+#define HIFN_DMA_CFG2			0x6C	/* DMA Configuration #2 */
+#define HIFN_CHIP_ID			0x98	/* Chip ID */
+
+/*
+ * Processing Unit Registers (offset from BASEREG0)
+ */
+#define	HIFN_0_PUDATA		0x00	/* Processing Unit Data */
+#define	HIFN_0_PUCTRL		0x04	/* Processing Unit Control */
+#define	HIFN_0_PUISR		0x08	/* Processing Unit Interrupt Status */
+#define	HIFN_0_PUCNFG		0x0c	/* Processing Unit Configuration */
+#define	HIFN_0_PUIER		0x10	/* Processing Unit Interrupt Enable */
+#define	HIFN_0_PUSTAT		0x14	/* Processing Unit Status/Chip ID */
+#define	HIFN_0_FIFOSTAT		0x18	/* FIFO Status */
+#define	HIFN_0_FIFOCNFG		0x1c	/* FIFO Configuration */
+#define	HIFN_0_SPACESIZE	0x20	/* Register space size */
+
+/* Processing Unit Control Register (HIFN_0_PUCTRL) */
+#define	HIFN_PUCTRL_CLRSRCFIFO	0x0010	/* clear source fifo */
+#define	HIFN_PUCTRL_STOP	0x0008	/* stop pu */
+#define	HIFN_PUCTRL_LOCKRAM	0x0004	/* lock ram */
+#define	HIFN_PUCTRL_DMAENA	0x0002	/* enable dma */
+#define	HIFN_PUCTRL_RESET	0x0001	/* Reset processing unit */
+
+/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
+#define	HIFN_PUISR_CMDINVAL	0x8000	/* Invalid command interrupt */
+#define	HIFN_PUISR_DATAERR	0x4000	/* Data error interrupt */
+#define	HIFN_PUISR_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
+#define	HIFN_PUISR_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
+#define	HIFN_PUISR_DSTOVER	0x0200	/* Destination overrun interrupt */
+#define	HIFN_PUISR_SRCCMD	0x0080	/* Source command interrupt */
+#define	HIFN_PUISR_SRCCTX	0x0040	/* Source context interrupt */
+#define	HIFN_PUISR_SRCDATA	0x0020	/* Source data interrupt */
+#define	HIFN_PUISR_DSTDATA	0x0010	/* Destination data interrupt */
+#define	HIFN_PUISR_DSTRESULT	0x0004	/* Destination result interrupt */
+
+/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
+#define	HIFN_PUCNFG_DRAMMASK	0xe000	/* DRAM size mask */
+#define	HIFN_PUCNFG_DSZ_256K	0x0000	/* 256k dram */
+#define	HIFN_PUCNFG_DSZ_512K	0x2000	/* 512k dram */
+#define	HIFN_PUCNFG_DSZ_1M	0x4000	/* 1m dram */
+#define	HIFN_PUCNFG_DSZ_2M	0x6000	/* 2m dram */
+#define	HIFN_PUCNFG_DSZ_4M	0x8000	/* 4m dram */
+#define	HIFN_PUCNFG_DSZ_8M	0xa000	/* 8m dram */
+#define	HIFN_PUNCFG_DSZ_16M	0xc000	/* 16m dram */
+#define	HIFN_PUCNFG_DSZ_32M	0xe000	/* 32m dram */
+#define	HIFN_PUCNFG_DRAMREFRESH	0x1800	/* DRAM refresh rate mask */
+#define	HIFN_PUCNFG_DRFR_512	0x0000	/* 512 divisor of ECLK */
+#define	HIFN_PUCNFG_DRFR_256	0x0800	/* 256 divisor of ECLK */
+#define	HIFN_PUCNFG_DRFR_128	0x1000	/* 128 divisor of ECLK */
+#define	HIFN_PUCNFG_TCALLPHASES	0x0200	/* your guess is as good as mine... */
+#define	HIFN_PUCNFG_TCDRVTOTEM	0x0100	/* your guess is as good as mine... */
+#define	HIFN_PUCNFG_BIGENDIAN	0x0080	/* DMA big endian mode */
+#define	HIFN_PUCNFG_BUS32	0x0040	/* Bus width 32bits */
+#define	HIFN_PUCNFG_BUS16	0x0000	/* Bus width 16 bits */
+#define	HIFN_PUCNFG_CHIPID	0x0020	/* Allow chipid from PUSTAT */
+#define	HIFN_PUCNFG_DRAM	0x0010	/* Context RAM is DRAM */
+#define	HIFN_PUCNFG_SRAM	0x0000	/* Context RAM is SRAM */
+#define	HIFN_PUCNFG_COMPSING	0x0004	/* Enable single compression context */
+#define	HIFN_PUCNFG_ENCCNFG	0x0002	/* Encryption configuration */
+
+/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
+#define	HIFN_PUIER_CMDINVAL	0x8000	/* Invalid command interrupt */
+#define	HIFN_PUIER_DATAERR	0x4000	/* Data error interrupt */
+#define	HIFN_PUIER_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
+#define	HIFN_PUIER_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
+#define	HIFN_PUIER_DSTOVER	0x0200	/* Destination overrun interrupt */
+#define	HIFN_PUIER_SRCCMD	0x0080	/* Source command interrupt */
+#define	HIFN_PUIER_SRCCTX	0x0040	/* Source context interrupt */
+#define	HIFN_PUIER_SRCDATA	0x0020	/* Source data interrupt */
+#define	HIFN_PUIER_DSTDATA	0x0010	/* Destination data interrupt */
+#define	HIFN_PUIER_DSTRESULT	0x0004	/* Destination result interrupt */
+
+/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
+#define	HIFN_PUSTAT_CMDINVAL	0x8000	/* Invalid command interrupt */
+#define	HIFN_PUSTAT_DATAERR	0x4000	/* Data error interrupt */
+#define	HIFN_PUSTAT_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
+#define	HIFN_PUSTAT_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
+#define	HIFN_PUSTAT_DSTOVER	0x0200	/* Destination overrun interrupt */
+#define	HIFN_PUSTAT_SRCCMD	0x0080	/* Source command interrupt */
+#define	HIFN_PUSTAT_SRCCTX	0x0040	/* Source context interrupt */
+#define	HIFN_PUSTAT_SRCDATA	0x0020	/* Source data interrupt */
+#define	HIFN_PUSTAT_DSTDATA	0x0010	/* Destination data interrupt */
+#define	HIFN_PUSTAT_DSTRESULT	0x0004	/* Destination result interrupt */
+#define	HIFN_PUSTAT_CHIPREV	0x00ff	/* Chip revision mask */
+#define	HIFN_PUSTAT_CHIPENA	0xff00	/* Chip enabled mask */
+#define	HIFN_PUSTAT_ENA_2	0x1100	/* Level 2 enabled */
+#define	HIFN_PUSTAT_ENA_1	0x1000	/* Level 1 enabled */
+#define	HIFN_PUSTAT_ENA_0	0x3000	/* Level 0 enabled */
+#define	HIFN_PUSTAT_REV_2	0x0020	/* 7751 PT6/2 */
+#define	HIFN_PUSTAT_REV_3	0x0030	/* 7751 PT6/3 */
+
+/* FIFO Status Register (HIFN_0_FIFOSTAT) */
+#define	HIFN_FIFOSTAT_SRC	0x7f00	/* Source FIFO available */
+#define	HIFN_FIFOSTAT_DST	0x007f	/* Destination FIFO available */
+
+/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
+#define	HIFN_FIFOCNFG_THRESHOLD	0x0400	/* must be written as 1 */
+
+/*
+ * DMA Interface Registers (offset from BASEREG1)
+ */
+#define	HIFN_1_DMA_CRAR		0x0c	/* DMA Command Ring Address */
+#define	HIFN_1_DMA_SRAR		0x1c	/* DMA Source Ring Address */
+#define	HIFN_1_DMA_RRAR		0x2c	/* DMA Result Ring Address */
+#define	HIFN_1_DMA_DRAR		0x3c	/* DMA Destination Ring Address */
+#define	HIFN_1_DMA_CSR		0x40	/* DMA Status and Control */
+#define	HIFN_1_DMA_IER		0x44	/* DMA Interrupt Enable */
+#define	HIFN_1_DMA_CNFG		0x48	/* DMA Configuration */
+#define	HIFN_1_PLL		0x4c	/* 795x: PLL config */
+#define	HIFN_1_7811_RNGENA	0x60	/* 7811: rng enable */
+#define	HIFN_1_7811_RNGCFG	0x64	/* 7811: rng config */
+#define	HIFN_1_7811_RNGDAT	0x68	/* 7811: rng data */
+#define	HIFN_1_7811_RNGSTS	0x6c	/* 7811: rng status */
+#define	HIFN_1_7811_MIPSRST	0x94	/* 7811: MIPS reset */
+#define	HIFN_1_REVID		0x98	/* Revision ID */
+#define	HIFN_1_UNLOCK_SECRET1	0xf4
+#define	HIFN_1_UNLOCK_SECRET2	0xfc
+#define	HIFN_1_PUB_RESET	0x204	/* Public/RNG Reset */
+#define	HIFN_1_PUB_BASE		0x300	/* Public Base Address */
+#define	HIFN_1_PUB_OPLEN	0x304	/* Public Operand Length */
+#define	HIFN_1_PUB_OP		0x308	/* Public Operand */
+#define	HIFN_1_PUB_STATUS	0x30c	/* Public Status */
+#define	HIFN_1_PUB_IEN		0x310	/* Public Interrupt enable */
+#define	HIFN_1_RNG_CONFIG	0x314	/* RNG config */
+#define	HIFN_1_RNG_DATA		0x318	/* RNG data */
+#define	HIFN_1_PUB_MEM		0x400	/* start of Public key memory */
+#define	HIFN_1_PUB_MEMEND	0xbff	/* end of Public key memory */
+
+/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
+#define	HIFN_DMACSR_D_CTRLMASK	0xc0000000	/* Destinition Ring Control */
+#define	HIFN_DMACSR_D_CTRL_NOP	0x00000000	/* Dest. Control: no-op */
+#define	HIFN_DMACSR_D_CTRL_DIS	0x40000000	/* Dest. Control: disable */
+#define	HIFN_DMACSR_D_CTRL_ENA	0x80000000	/* Dest. Control: enable */
+#define	HIFN_DMACSR_D_ABORT	0x20000000	/* Destinition Ring PCIAbort */
+#define	HIFN_DMACSR_D_DONE	0x10000000	/* Destinition Ring Done */
+#define	HIFN_DMACSR_D_LAST	0x08000000	/* Destinition Ring Last */
+#define	HIFN_DMACSR_D_WAIT	0x04000000	/* Destinition Ring Waiting */
+#define	HIFN_DMACSR_D_OVER	0x02000000	/* Destinition Ring Overflow */
+#define	HIFN_DMACSR_R_CTRL	0x00c00000	/* Result Ring Control */
+#define	HIFN_DMACSR_R_CTRL_NOP	0x00000000	/* Result Control: no-op */
+#define	HIFN_DMACSR_R_CTRL_DIS	0x00400000	/* Result Control: disable */
+#define	HIFN_DMACSR_R_CTRL_ENA	0x00800000	/* Result Control: enable */
+#define	HIFN_DMACSR_R_ABORT	0x00200000	/* Result Ring PCI Abort */
+#define	HIFN_DMACSR_R_DONE	0x00100000	/* Result Ring Done */
+#define	HIFN_DMACSR_R_LAST	0x00080000	/* Result Ring Last */
+#define	HIFN_DMACSR_R_WAIT	0x00040000	/* Result Ring Waiting */
+#define	HIFN_DMACSR_R_OVER	0x00020000	/* Result Ring Overflow */
+#define	HIFN_DMACSR_S_CTRL	0x0000c000	/* Source Ring Control */
+#define	HIFN_DMACSR_S_CTRL_NOP	0x00000000	/* Source Control: no-op */
+#define	HIFN_DMACSR_S_CTRL_DIS	0x00004000	/* Source Control: disable */
+#define	HIFN_DMACSR_S_CTRL_ENA	0x00008000	/* Source Control: enable */
+#define	HIFN_DMACSR_S_ABORT	0x00002000	/* Source Ring PCI Abort */
+#define	HIFN_DMACSR_S_DONE	0x00001000	/* Source Ring Done */
+#define	HIFN_DMACSR_S_LAST	0x00000800	/* Source Ring Last */
+#define	HIFN_DMACSR_S_WAIT	0x00000400	/* Source Ring Waiting */
+#define	HIFN_DMACSR_ILLW	0x00000200	/* Illegal write (7811 only) */
+#define	HIFN_DMACSR_ILLR	0x00000100	/* Illegal read (7811 only) */
+#define	HIFN_DMACSR_C_CTRL	0x000000c0	/* Command Ring Control */
+#define	HIFN_DMACSR_C_CTRL_NOP	0x00000000	/* Command Control: no-op */
+#define	HIFN_DMACSR_C_CTRL_DIS	0x00000040	/* Command Control: disable */
+#define	HIFN_DMACSR_C_CTRL_ENA	0x00000080	/* Command Control: enable */
+#define	HIFN_DMACSR_C_ABORT	0x00000020	/* Command Ring PCI Abort */
+#define	HIFN_DMACSR_C_DONE	0x00000010	/* Command Ring Done */
+#define	HIFN_DMACSR_C_LAST	0x00000008	/* Command Ring Last */
+#define	HIFN_DMACSR_C_WAIT	0x00000004	/* Command Ring Waiting */
+#define	HIFN_DMACSR_PUBDONE	0x00000002	/* Public op done (7951 only) */
+#define	HIFN_DMACSR_ENGINE	0x00000001	/* Command Ring Engine IRQ */
+
+/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
+#define	HIFN_DMAIER_D_ABORT	0x20000000	/* Destination Ring PCIAbort */
+#define	HIFN_DMAIER_D_DONE	0x10000000	/* Destination Ring Done */
+#define	HIFN_DMAIER_D_LAST	0x08000000	/* Destination Ring Last */
+#define	HIFN_DMAIER_D_WAIT	0x04000000	/* Destination Ring Waiting */
+#define	HIFN_DMAIER_D_OVER	0x02000000	/* Destination Ring Overflow */
+#define	HIFN_DMAIER_R_ABORT	0x00200000	/* Result Ring PCI Abort */
+#define	HIFN_DMAIER_R_DONE	0x00100000	/* Result Ring Done */
+#define	HIFN_DMAIER_R_LAST	0x00080000	/* Result Ring Last */
+#define	HIFN_DMAIER_R_WAIT	0x00040000	/* Result Ring Waiting */
+#define	HIFN_DMAIER_R_OVER	0x00020000	/* Result Ring Overflow */
+#define	HIFN_DMAIER_S_ABORT	0x00002000	/* Source Ring PCI Abort */
+#define	HIFN_DMAIER_S_DONE	0x00001000	/* Source Ring Done */
+#define	HIFN_DMAIER_S_LAST	0x00000800	/* Source Ring Last */
+#define	HIFN_DMAIER_S_WAIT	0x00000400	/* Source Ring Waiting */
+#define	HIFN_DMAIER_ILLW	0x00000200	/* Illegal write (7811 only) */
+#define	HIFN_DMAIER_ILLR	0x00000100	/* Illegal read (7811 only) */
+#define	HIFN_DMAIER_C_ABORT	0x00000020	/* Command Ring PCI Abort */
+#define	HIFN_DMAIER_C_DONE	0x00000010	/* Command Ring Done */
+#define	HIFN_DMAIER_C_LAST	0x00000008	/* Command Ring Last */
+#define	HIFN_DMAIER_C_WAIT	0x00000004	/* Command Ring Waiting */
+#define	HIFN_DMAIER_PUBDONE	0x00000002	/* public op done (7951 only) */
+#define	HIFN_DMAIER_ENGINE	0x00000001	/* Engine IRQ */
+
+/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
+#define	HIFN_DMACNFG_BIGENDIAN	0x10000000	/* big endian mode */
+#define	HIFN_DMACNFG_POLLFREQ	0x00ff0000	/* Poll frequency mask */
+#define	HIFN_DMACNFG_UNLOCK	0x00000800
+#define	HIFN_DMACNFG_POLLINVAL	0x00000700	/* Invalid Poll Scalar */
+#define	HIFN_DMACNFG_LAST	0x00000010	/* Host control LAST bit */
+#define	HIFN_DMACNFG_MODE	0x00000004	/* DMA mode */
+#define	HIFN_DMACNFG_DMARESET	0x00000002	/* DMA Reset # */
+#define	HIFN_DMACNFG_MSTRESET	0x00000001	/* Master Reset # */
+
+/* PLL configuration register */
+#define HIFN_PLL_REF_CLK_HBI	0x00000000	/* HBI reference clock */
+#define HIFN_PLL_REF_CLK_PLL	0x00000001	/* PLL reference clock */
+#define HIFN_PLL_BP		0x00000002	/* Reference clock bypass */
+#define HIFN_PLL_PK_CLK_HBI	0x00000000	/* PK engine HBI clock */
+#define HIFN_PLL_PK_CLK_PLL	0x00000008	/* PK engine PLL clock */
+#define HIFN_PLL_PE_CLK_HBI	0x00000000	/* PE engine HBI clock */
+#define HIFN_PLL_PE_CLK_PLL	0x00000010	/* PE engine PLL clock */
+#define HIFN_PLL_RESERVED_1	0x00000400	/* Reserved bit, must be 1 */
+#define HIFN_PLL_ND_SHIFT	11		/* Clock multiplier shift */
+#define HIFN_PLL_ND_MULT_2	0x00000000	/* PLL clock multiplier 2 */
+#define HIFN_PLL_ND_MULT_4	0x00000800	/* PLL clock multiplier 4 */
+#define HIFN_PLL_ND_MULT_6	0x00001000	/* PLL clock multiplier 6 */
+#define HIFN_PLL_ND_MULT_8	0x00001800	/* PLL clock multiplier 8 */
+#define HIFN_PLL_ND_MULT_10	0x00002000	/* PLL clock multiplier 10 */
+#define HIFN_PLL_ND_MULT_12	0x00002800	/* PLL clock multiplier 12 */
+#define HIFN_PLL_IS_1_8		0x00000000	/* charge pump (mult. 1-8) */
+#define HIFN_PLL_IS_9_12	0x00010000	/* charge pump (mult. 9-12) */
+
+#define HIFN_PLL_FCK_MAX	266		/* Maximum PLL frequency */
+
+/* Public key reset register (HIFN_1_PUB_RESET) */
+#define	HIFN_PUBRST_RESET	0x00000001	/* reset public/rng unit */
+
+/* Public base address register (HIFN_1_PUB_BASE) */
+#define	HIFN_PUBBASE_ADDR	0x00003fff	/* base address */
+
+/* Public operand length register (HIFN_1_PUB_OPLEN) */
+#define	HIFN_PUBOPLEN_MOD_M	0x0000007f	/* modulus length mask */
+#define	HIFN_PUBOPLEN_MOD_S	0		/* modulus length shift */
+#define	HIFN_PUBOPLEN_EXP_M	0x0003ff80	/* exponent length mask */
+#define	HIFN_PUBOPLEN_EXP_S	7		/* exponent length shift */
+#define	HIFN_PUBOPLEN_RED_M	0x003c0000	/* reducend length mask */
+#define	HIFN_PUBOPLEN_RED_S	18		/* reducend length shift */
+
+/* Public operation register (HIFN_1_PUB_OP) */
+#define	HIFN_PUBOP_AOFFSET_M	0x0000007f	/* A offset mask */
+#define	HIFN_PUBOP_AOFFSET_S	0		/* A offset shift */
+#define	HIFN_PUBOP_BOFFSET_M	0x00000f80	/* B offset mask */
+#define	HIFN_PUBOP_BOFFSET_S	7		/* B offset shift */
+#define	HIFN_PUBOP_MOFFSET_M	0x0003f000	/* M offset mask */
+#define	HIFN_PUBOP_MOFFSET_S	12		/* M offset shift */
+#define	HIFN_PUBOP_OP_MASK	0x003c0000	/* Opcode: */
+#define	HIFN_PUBOP_OP_NOP	0x00000000	/*  NOP */
+#define	HIFN_PUBOP_OP_ADD	0x00040000	/*  ADD */
+#define	HIFN_PUBOP_OP_ADDC	0x00080000	/*  ADD w/carry */
+#define	HIFN_PUBOP_OP_SUB	0x000c0000	/*  SUB */
+#define	HIFN_PUBOP_OP_SUBC	0x00100000	/*  SUB w/carry */
+#define	HIFN_PUBOP_OP_MODADD	0x00140000	/*  Modular ADD */
+#define	HIFN_PUBOP_OP_MODSUB	0x00180000	/*  Modular SUB */
+#define	HIFN_PUBOP_OP_INCA	0x001c0000	/*  INC A */
+#define	HIFN_PUBOP_OP_DECA	0x00200000	/*  DEC A */
+#define	HIFN_PUBOP_OP_MULT	0x00240000	/*  MULT */
+#define	HIFN_PUBOP_OP_MODMULT	0x00280000	/*  Modular MULT */
+#define	HIFN_PUBOP_OP_MODRED	0x002c0000	/*  Modular RED */
+#define	HIFN_PUBOP_OP_MODEXP	0x00300000	/*  Modular EXP */
+
+/* Public status register (HIFN_1_PUB_STATUS) */
+#define	HIFN_PUBSTS_DONE	0x00000001	/* operation done */
+#define	HIFN_PUBSTS_CARRY	0x00000002	/* carry */
+
+/* Public interrupt enable register (HIFN_1_PUB_IEN) */
+#define	HIFN_PUBIEN_DONE	0x00000001	/* operation done interrupt */
+
+/* Random number generator config register (HIFN_1_RNG_CONFIG) */
+#define	HIFN_RNGCFG_ENA		0x00000001	/* enable rng */
+
+#define HIFN_NAMESIZE			32
+#define HIFN_MAX_RESULT_ORDER		5
+
+#define	HIFN_D_CMD_RSIZE		(24 * 1)
+#define	HIFN_D_SRC_RSIZE		(80 * 1)
+#define	HIFN_D_DST_RSIZE		(80 * 1)
+#define	HIFN_D_RES_RSIZE		(24 * 1)
+
+#define HIFN_D_DST_DALIGN		4
+
+#define HIFN_QUEUE_LENGTH		(HIFN_D_CMD_RSIZE - 1)
+
+#define AES_MIN_KEY_SIZE		16
+#define AES_MAX_KEY_SIZE		32
+
+#define HIFN_DES_KEY_LENGTH		8
+#define HIFN_3DES_KEY_LENGTH		24
+#define HIFN_MAX_CRYPT_KEY_LENGTH	AES_MAX_KEY_SIZE
+#define HIFN_IV_LENGTH			8
+#define HIFN_AES_IV_LENGTH		16
+#define	HIFN_MAX_IV_LENGTH		HIFN_AES_IV_LENGTH
+
+#define HIFN_MAC_KEY_LENGTH		64
+#define HIFN_MD5_LENGTH			16
+#define HIFN_SHA1_LENGTH		20
+#define HIFN_MAC_TRUNC_LENGTH		12
+
+#define	HIFN_MAX_COMMAND		(8 + 8 + 8 + 64 + 260)
+#define	HIFN_MAX_RESULT			(8 + 4 + 4 + 20 + 4)
+#define HIFN_USED_RESULT		12
+
+struct hifn_desc {
+	volatile __le32		l;
+	volatile __le32		p;
+};
+
+struct hifn_dma {
+	struct hifn_desc	cmdr[HIFN_D_CMD_RSIZE + 1];
+	struct hifn_desc	srcr[HIFN_D_SRC_RSIZE + 1];
+	struct hifn_desc	dstr[HIFN_D_DST_RSIZE + 1];
+	struct hifn_desc	resr[HIFN_D_RES_RSIZE + 1];
+
+	u8			command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
+	u8			result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
+
+	/*
+	 *  Our current positions for insertion and removal from the descriptor
+	 *  rings.
+	 */
+	volatile int		cmdi, srci, dsti, resi;
+	volatile int		cmdu, srcu, dstu, resu;
+	int			cmdk, srck, dstk, resk;
+};
+
+#define HIFN_FLAG_CMD_BUSY	(1 << 0)
+#define HIFN_FLAG_SRC_BUSY	(1 << 1)
+#define HIFN_FLAG_DST_BUSY	(1 << 2)
+#define HIFN_FLAG_RES_BUSY	(1 << 3)
+#define HIFN_FLAG_OLD_KEY	(1 << 4)
+
+#define HIFN_DEFAULT_ACTIVE_NUM	5
+
+struct hifn_device {
+	char			name[HIFN_NAMESIZE];
+
+	int			irq;
+
+	struct pci_dev		*pdev;
+	void __iomem		*bar[3];
+
+	void			*desc_virt;
+	dma_addr_t		desc_dma;
+
+	u32			dmareg;
+
+	void			*sa[HIFN_D_RES_RSIZE];
+
+	spinlock_t		lock;
+
+	u32			flags;
+	int			active, started;
+	struct delayed_work	work;
+	unsigned long		reset;
+	unsigned long		success;
+	unsigned long		prev_success;
+
+	u8			snum;
+
+	struct tasklet_struct	tasklet;
+
+	struct crypto_queue	queue;
+	struct list_head	alg_list;
+
+	unsigned int		pk_clk_freq;
+
+#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
+	unsigned int		rng_wait_time;
+	ktime_t			rngtime;
+	struct hwrng		rng;
+#endif
+};
+
+#define	HIFN_D_LENGTH			0x0000ffff
+#define	HIFN_D_NOINVALID		0x01000000
+#define	HIFN_D_MASKDONEIRQ		0x02000000
+#define	HIFN_D_DESTOVER			0x04000000
+#define	HIFN_D_OVER			0x08000000
+#define	HIFN_D_LAST			0x20000000
+#define	HIFN_D_JUMP			0x40000000
+#define	HIFN_D_VALID			0x80000000
+
+struct hifn_base_command {
+	volatile __le16		masks;
+	volatile __le16		session_num;
+	volatile __le16		total_source_count;
+	volatile __le16		total_dest_count;
+};
+
+#define	HIFN_BASE_CMD_COMP		0x0100	/* enable compression engine */
+#define	HIFN_BASE_CMD_PAD		0x0200	/* enable padding engine */
+#define	HIFN_BASE_CMD_MAC		0x0400	/* enable MAC engine */
+#define	HIFN_BASE_CMD_CRYPT		0x0800	/* enable crypt engine */
+#define	HIFN_BASE_CMD_DECODE		0x2000
+#define	HIFN_BASE_CMD_SRCLEN_M		0xc000
+#define	HIFN_BASE_CMD_SRCLEN_S		14
+#define	HIFN_BASE_CMD_DSTLEN_M		0x3000
+#define	HIFN_BASE_CMD_DSTLEN_S		12
+#define	HIFN_BASE_CMD_LENMASK_HI	0x30000
+#define	HIFN_BASE_CMD_LENMASK_LO	0x0ffff
+
+/*
+ * Structure to help build up the command data structure.
+ */
+struct hifn_crypt_command {
+	volatile __le16		masks;
+	volatile __le16		header_skip;
+	volatile __le16		source_count;
+	volatile __le16		reserved;
+};
+
+#define	HIFN_CRYPT_CMD_ALG_MASK		0x0003		/* algorithm: */
+#define	HIFN_CRYPT_CMD_ALG_DES		0x0000		/*   DES */
+#define	HIFN_CRYPT_CMD_ALG_3DES		0x0001		/*   3DES */
+#define	HIFN_CRYPT_CMD_ALG_RC4		0x0002		/*   RC4 */
+#define	HIFN_CRYPT_CMD_ALG_AES		0x0003		/*   AES */
+#define	HIFN_CRYPT_CMD_MODE_MASK	0x0018		/* Encrypt mode: */
+#define	HIFN_CRYPT_CMD_MODE_ECB		0x0000		/*   ECB */
+#define	HIFN_CRYPT_CMD_MODE_CBC		0x0008		/*   CBC */
+#define	HIFN_CRYPT_CMD_MODE_CFB		0x0010		/*   CFB */
+#define	HIFN_CRYPT_CMD_MODE_OFB		0x0018		/*   OFB */
+#define	HIFN_CRYPT_CMD_CLR_CTX		0x0040		/* clear context */
+#define	HIFN_CRYPT_CMD_KSZ_MASK		0x0600		/* AES key size: */
+#define	HIFN_CRYPT_CMD_KSZ_128		0x0000		/*  128 bit */
+#define	HIFN_CRYPT_CMD_KSZ_192		0x0200		/*  192 bit */
+#define	HIFN_CRYPT_CMD_KSZ_256		0x0400		/*  256 bit */
+#define	HIFN_CRYPT_CMD_NEW_KEY		0x0800		/* expect new key */
+#define	HIFN_CRYPT_CMD_NEW_IV		0x1000		/* expect new iv */
+#define	HIFN_CRYPT_CMD_SRCLEN_M		0xc000
+#define	HIFN_CRYPT_CMD_SRCLEN_S		14
+
+/*
+ * Structure to help build up the command data structure.
+ */
+struct hifn_mac_command {
+	volatile __le16	masks;
+	volatile __le16	header_skip;
+	volatile __le16	source_count;
+	volatile __le16	reserved;
+};
+
+#define	HIFN_MAC_CMD_ALG_MASK		0x0001
+#define	HIFN_MAC_CMD_ALG_SHA1		0x0000
+#define	HIFN_MAC_CMD_ALG_MD5		0x0001
+#define	HIFN_MAC_CMD_MODE_MASK		0x000c
+#define	HIFN_MAC_CMD_MODE_HMAC		0x0000
+#define	HIFN_MAC_CMD_MODE_SSL_MAC	0x0004
+#define	HIFN_MAC_CMD_MODE_HASH		0x0008
+#define	HIFN_MAC_CMD_MODE_FULL		0x0004
+#define	HIFN_MAC_CMD_TRUNC		0x0010
+#define	HIFN_MAC_CMD_RESULT		0x0020
+#define	HIFN_MAC_CMD_APPEND		0x0040
+#define	HIFN_MAC_CMD_SRCLEN_M		0xc000
+#define	HIFN_MAC_CMD_SRCLEN_S		14
+
+/*
+ * MAC POS IPsec initiates authentication after encryption on encodes
+ * and before decryption on decodes.
+ */
+#define	HIFN_MAC_CMD_POS_IPSEC		0x0200
+#define	HIFN_MAC_CMD_NEW_KEY		0x0800
+
+struct hifn_comp_command {
+	volatile __le16		masks;
+	volatile __le16		header_skip;
+	volatile __le16		source_count;
+	volatile __le16		reserved;
+};
+
+#define	HIFN_COMP_CMD_SRCLEN_M		0xc000
+#define	HIFN_COMP_CMD_SRCLEN_S		14
+#define	HIFN_COMP_CMD_ONE		0x0100	/* must be one */
+#define	HIFN_COMP_CMD_CLEARHIST		0x0010	/* clear history */
+#define	HIFN_COMP_CMD_UPDATEHIST	0x0008	/* update history */
+#define	HIFN_COMP_CMD_LZS_STRIP0	0x0004	/* LZS: strip zero */
+#define	HIFN_COMP_CMD_MPPC_RESTART	0x0004	/* MPPC: restart */
+#define	HIFN_COMP_CMD_ALG_MASK		0x0001	/* compression mode: */
+#define	HIFN_COMP_CMD_ALG_MPPC		0x0001	/*   MPPC */
+#define	HIFN_COMP_CMD_ALG_LZS		0x0000	/*   LZS */
+
+struct hifn_base_result {
+	volatile __le16		flags;
+	volatile __le16		session;
+	volatile __le16		src_cnt;		/* 15:0 of source count */
+	volatile __le16		dst_cnt;		/* 15:0 of dest count */
+};
+
+#define	HIFN_BASE_RES_DSTOVERRUN	0x0200	/* destination overrun */
+#define	HIFN_BASE_RES_SRCLEN_M		0xc000	/* 17:16 of source count */
+#define	HIFN_BASE_RES_SRCLEN_S		14
+#define	HIFN_BASE_RES_DSTLEN_M		0x3000	/* 17:16 of dest count */
+#define	HIFN_BASE_RES_DSTLEN_S		12
+
+struct hifn_comp_result {
+	volatile __le16		flags;
+	volatile __le16		crc;
+};
+
+#define	HIFN_COMP_RES_LCB_M		0xff00	/* longitudinal check byte */
+#define	HIFN_COMP_RES_LCB_S		8
+#define	HIFN_COMP_RES_RESTART		0x0004	/* MPPC: restart */
+#define	HIFN_COMP_RES_ENDMARKER		0x0002	/* LZS: end marker seen */
+#define	HIFN_COMP_RES_SRC_NOTZERO	0x0001	/* source expired */
+
+struct hifn_mac_result {
+	volatile __le16		flags;
+	volatile __le16		reserved;
+	/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
+};
+
+#define	HIFN_MAC_RES_MISCOMPARE		0x0002	/* compare failed */
+#define	HIFN_MAC_RES_SRC_NOTZERO	0x0001	/* source expired */
+
+struct hifn_crypt_result {
+	volatile __le16		flags;
+	volatile __le16		reserved;
+};
+
+#define	HIFN_CRYPT_RES_SRC_NOTZERO	0x0001	/* source expired */
+
+#ifndef HIFN_POLL_FREQUENCY
+#define	HIFN_POLL_FREQUENCY	0x1
+#endif
+
+#ifndef HIFN_POLL_SCALAR
+#define	HIFN_POLL_SCALAR	0x0
+#endif
+
+#define	HIFN_MAX_SEGLEN		0xffff		/* maximum dma segment len */
+#define	HIFN_MAX_DMALEN		0x3ffff		/* maximum dma length */
+
+struct hifn_crypto_alg {
+	struct list_head	entry;
+	struct crypto_alg	alg;
+	struct hifn_device	*dev;
+};
+
+#define ASYNC_SCATTERLIST_CACHE	16
+
+#define ASYNC_FLAGS_MISALIGNED	(1 << 0)
+
+struct hifn_cipher_walk {
+	struct scatterlist	cache[ASYNC_SCATTERLIST_CACHE];
+	u32			flags;
+	int			num;
+};
+
+struct hifn_context {
+	u8			key[HIFN_MAX_CRYPT_KEY_LENGTH];
+	struct hifn_device	*dev;
+	unsigned int		keysize;
+};
+
+struct hifn_request_context {
+	u8			*iv;
+	unsigned int		ivsize;
+	u8			op, type, mode, unused;
+	struct hifn_cipher_walk	walk;
+};
+
+#define crypto_alg_to_hifn(a)	container_of(a, struct hifn_crypto_alg, alg)
+
+static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
+{
+	return readl(dev->bar[0] + reg);
+}
+
+static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
+{
+	return readl(dev->bar[1] + reg);
+}
+
+static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
+{
+	writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
+}
+
+static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
+{
+	writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
+}
+
+static void hifn_wait_puc(struct hifn_device *dev)
+{
+	int i;
+	u32 ret;
+
+	for (i = 10000; i > 0; --i) {
+		ret = hifn_read_0(dev, HIFN_0_PUCTRL);
+		if (!(ret & HIFN_PUCTRL_RESET))
+			break;
+
+		udelay(1);
+	}
+
+	if (!i)
+		dev_err(&dev->pdev->dev, "Failed to reset PUC unit.\n");
+}
+
+static void hifn_reset_puc(struct hifn_device *dev)
+{
+	hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
+	hifn_wait_puc(dev);
+}
+
+static void hifn_stop_device(struct hifn_device *dev)
+{
+	hifn_write_1(dev, HIFN_1_DMA_CSR,
+		HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
+		HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
+	hifn_write_0(dev, HIFN_0_PUIER, 0);
+	hifn_write_1(dev, HIFN_1_DMA_IER, 0);
+}
+
+static void hifn_reset_dma(struct hifn_device *dev, int full)
+{
+	hifn_stop_device(dev);
+
+	/*
+	 * Setting poll frequency and others to 0.
+	 */
+	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+	mdelay(1);
+
+	/*
+	 * Reset DMA.
+	 */
+	if (full) {
+		hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
+		mdelay(1);
+	} else {
+		hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
+				HIFN_DMACNFG_MSTRESET);
+		hifn_reset_puc(dev);
+	}
+
+	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+	hifn_reset_puc(dev);
+}
+
+static u32 hifn_next_signature(u32 a, u_int cnt)
+{
+	int i;
+	u32 v;
+
+	for (i = 0; i < cnt; i++) {
+		/* get the parity */
+		v = a & 0x80080125;
+		v ^= v >> 16;
+		v ^= v >> 8;
+		v ^= v >> 4;
+		v ^= v >> 2;
+		v ^= v >> 1;
+
+		a = (v & 1) ^ (a << 1);
+	}
+
+	return a;
+}
+
+static struct pci2id {
+	u_short		pci_vendor;
+	u_short		pci_prod;
+	char		card_id[13];
+} pci2id[] = {
+	{
+		PCI_VENDOR_ID_HIFN,
+		PCI_DEVICE_ID_HIFN_7955,
+		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		  0x00, 0x00, 0x00, 0x00, 0x00 }
+	},
+	{
+		PCI_VENDOR_ID_HIFN,
+		PCI_DEVICE_ID_HIFN_7956,
+		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		  0x00, 0x00, 0x00, 0x00, 0x00 }
+	}
+};
+
+#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
+static int hifn_rng_data_present(struct hwrng *rng, int wait)
+{
+	struct hifn_device *dev = (struct hifn_device *)rng->priv;
+	s64 nsec;
+
+	nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime));
+	nsec -= dev->rng_wait_time;
+	if (nsec <= 0)
+		return 1;
+	if (!wait)
+		return 0;
+	ndelay(nsec);
+	return 1;
+}
+
+static int hifn_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	struct hifn_device *dev = (struct hifn_device *)rng->priv;
+
+	*data = hifn_read_1(dev, HIFN_1_RNG_DATA);
+	dev->rngtime = ktime_get();
+	return 4;
+}
+
+static int hifn_register_rng(struct hifn_device *dev)
+{
+	/*
+	 * We must wait at least 256 Pk_clk cycles between two reads of the rng.
+	 */
+	dev->rng_wait_time	= DIV_ROUND_UP_ULL(NSEC_PER_SEC,
+						   dev->pk_clk_freq) * 256;
+
+	dev->rng.name		= dev->name;
+	dev->rng.data_present	= hifn_rng_data_present,
+	dev->rng.data_read	= hifn_rng_data_read,
+	dev->rng.priv		= (unsigned long)dev;
+
+	return hwrng_register(&dev->rng);
+}
+
+static void hifn_unregister_rng(struct hifn_device *dev)
+{
+	hwrng_unregister(&dev->rng);
+}
+#else
+#define hifn_register_rng(dev)		0
+#define hifn_unregister_rng(dev)
+#endif
+
+static int hifn_init_pubrng(struct hifn_device *dev)
+{
+	int i;
+
+	hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
+			HIFN_PUBRST_RESET);
+
+	for (i = 100; i > 0; --i) {
+		mdelay(1);
+
+		if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
+			break;
+	}
+
+	if (!i) {
+		dev_err(&dev->pdev->dev, "Failed to initialise public key engine.\n");
+	} else {
+		hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
+		dev->dmareg |= HIFN_DMAIER_PUBDONE;
+		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
+
+		dev_dbg(&dev->pdev->dev, "Public key engine has been successfully initialised.\n");
+	}
+
+	/* Enable RNG engine. */
+
+	hifn_write_1(dev, HIFN_1_RNG_CONFIG,
+			hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
+	dev_dbg(&dev->pdev->dev, "RNG engine has been successfully initialised.\n");
+
+#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
+	/* First value must be discarded */
+	hifn_read_1(dev, HIFN_1_RNG_DATA);
+	dev->rngtime = ktime_get();
+#endif
+	return 0;
+}
+
+static int hifn_enable_crypto(struct hifn_device *dev)
+{
+	u32 dmacfg, addr;
+	char *offtbl = NULL;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
+		if (pci2id[i].pci_vendor == dev->pdev->vendor &&
+				pci2id[i].pci_prod == dev->pdev->device) {
+			offtbl = pci2id[i].card_id;
+			break;
+		}
+	}
+
+	if (!offtbl) {
+		dev_err(&dev->pdev->dev, "Unknown card!\n");
+		return -ENODEV;
+	}
+
+	dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
+
+	hifn_write_1(dev, HIFN_1_DMA_CNFG,
+			HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
+			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+	mdelay(1);
+	addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
+	mdelay(1);
+	hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
+	mdelay(1);
+
+	for (i = 0; i < 12; ++i) {
+		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
+		hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
+
+		mdelay(1);
+	}
+	hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
+
+	dev_dbg(&dev->pdev->dev, "%s %s.\n", dev->name, pci_name(dev->pdev));
+
+	return 0;
+}
+
+static void hifn_init_dma(struct hifn_device *dev)
+{
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+	u32 dptr = dev->desc_dma;
+	int i;
+
+	for (i = 0; i < HIFN_D_CMD_RSIZE; ++i)
+		dma->cmdr[i].p = __cpu_to_le32(dptr +
+				offsetof(struct hifn_dma, command_bufs[i][0]));
+	for (i = 0; i < HIFN_D_RES_RSIZE; ++i)
+		dma->resr[i].p = __cpu_to_le32(dptr +
+				offsetof(struct hifn_dma, result_bufs[i][0]));
+
+	/* Setup LAST descriptors. */
+	dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
+			offsetof(struct hifn_dma, cmdr[0]));
+	dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
+			offsetof(struct hifn_dma, srcr[0]));
+	dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
+			offsetof(struct hifn_dma, dstr[0]));
+	dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
+			offsetof(struct hifn_dma, resr[0]));
+
+	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
+	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
+	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
+}
+
+/*
+ * Initialize the PLL. We need to know the frequency of the reference clock
+ * to calculate the optimal multiplier. For PCI we assume 66MHz, since that
+ * allows us to operate without the risk of overclocking the chip. If it
+ * actually uses 33MHz, the chip will operate at half the speed, this can be
+ * overridden by specifying the frequency as module parameter (pci33).
+ *
+ * Unfortunately the PCI clock is not very suitable since the HIFN needs a
+ * stable clock and the PCI clock frequency may vary, so the default is the
+ * external clock. There is no way to find out its frequency, we default to
+ * 66MHz since according to Mike Ham of HiFn, almost every board in existence
+ * has an external crystal populated at 66MHz.
+ */
+static void hifn_init_pll(struct hifn_device *dev)
+{
+	unsigned int freq, m;
+	u32 pllcfg;
+
+	pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1;
+
+	if (strncmp(hifn_pll_ref, "ext", 3) == 0)
+		pllcfg |= HIFN_PLL_REF_CLK_PLL;
+	else
+		pllcfg |= HIFN_PLL_REF_CLK_HBI;
+
+	if (hifn_pll_ref[3] != '\0')
+		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
+	else {
+		freq = 66;
+		dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n",
+			 freq, hifn_pll_ref);
+	}
+
+	m = HIFN_PLL_FCK_MAX / freq;
+
+	pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT;
+	if (m <= 8)
+		pllcfg |= HIFN_PLL_IS_1_8;
+	else
+		pllcfg |= HIFN_PLL_IS_9_12;
+
+	/* Select clock source and enable clock bypass */
+	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
+		     HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP);
+
+	/* Let the chip lock to the input clock */
+	mdelay(10);
+
+	/* Disable clock bypass */
+	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
+		     HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI);
+
+	/* Switch the engines to the PLL */
+	hifn_write_1(dev, HIFN_1_PLL, pllcfg |
+		     HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL);
+
+	/*
+	 * The Fpk_clk runs at half the total speed. Its frequency is needed to
+	 * calculate the minimum time between two reads of the rng. Since 33MHz
+	 * is actually 33.333... we overestimate the frequency here, resulting
+	 * in slightly larger intervals.
+	 */
+	dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2;
+}
+
+static void hifn_init_registers(struct hifn_device *dev)
+{
+	u32 dptr = dev->desc_dma;
+
+	/* Initialization magic... */
+	hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
+	hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
+	hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
+
+	/* write all 4 ring address registers */
+	hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
+				offsetof(struct hifn_dma, cmdr[0]));
+	hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
+				offsetof(struct hifn_dma, srcr[0]));
+	hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
+				offsetof(struct hifn_dma, dstr[0]));
+	hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
+				offsetof(struct hifn_dma, resr[0]));
+
+	mdelay(2);
+#if 0
+	hifn_write_1(dev, HIFN_1_DMA_CSR,
+	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
+	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
+	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
+	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
+	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
+	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
+	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
+	    HIFN_DMACSR_S_WAIT |
+	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
+	    HIFN_DMACSR_C_WAIT |
+	    HIFN_DMACSR_ENGINE |
+	    HIFN_DMACSR_PUBDONE);
+#else
+	hifn_write_1(dev, HIFN_1_DMA_CSR,
+	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
+	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
+	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
+	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
+	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
+	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
+	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
+	    HIFN_DMACSR_S_WAIT |
+	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
+	    HIFN_DMACSR_C_WAIT |
+	    HIFN_DMACSR_ENGINE |
+	    HIFN_DMACSR_PUBDONE);
+#endif
+	hifn_read_1(dev, HIFN_1_DMA_CSR);
+
+	dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
+	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
+	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
+	    HIFN_DMAIER_ENGINE;
+	dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
+
+	hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
+	hifn_read_1(dev, HIFN_1_DMA_IER);
+#if 0
+	hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
+		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
+		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
+		    HIFN_PUCNFG_DRAM);
+#else
+	hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
+#endif
+	hifn_init_pll(dev);
+
+	hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
+	hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
+	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
+	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
+}
+
+static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
+		unsigned dlen, unsigned slen, u16 mask, u8 snum)
+{
+	struct hifn_base_command *base_cmd;
+	u8 *buf_pos = buf;
+
+	base_cmd = (struct hifn_base_command *)buf_pos;
+	base_cmd->masks = __cpu_to_le16(mask);
+	base_cmd->total_source_count =
+		__cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
+	base_cmd->total_dest_count =
+		__cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
+
+	dlen >>= 16;
+	slen >>= 16;
+	base_cmd->session_num = __cpu_to_le16(snum |
+	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
+	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
+
+	return sizeof(struct hifn_base_command);
+}
+
+static int hifn_setup_crypto_command(struct hifn_device *dev,
+		u8 *buf, unsigned dlen, unsigned slen,
+		u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
+{
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+	struct hifn_crypt_command *cry_cmd;
+	u8 *buf_pos = buf;
+	u16 cmd_len;
+
+	cry_cmd = (struct hifn_crypt_command *)buf_pos;
+
+	cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
+	dlen >>= 16;
+	cry_cmd->masks = __cpu_to_le16(mode |
+			((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
+			 HIFN_CRYPT_CMD_SRCLEN_M));
+	cry_cmd->header_skip = 0;
+	cry_cmd->reserved = 0;
+
+	buf_pos += sizeof(struct hifn_crypt_command);
+
+	dma->cmdu++;
+	if (dma->cmdu > 1) {
+		dev->dmareg |= HIFN_DMAIER_C_WAIT;
+		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
+	}
+
+	if (keylen) {
+		memcpy(buf_pos, key, keylen);
+		buf_pos += keylen;
+	}
+	if (ivsize) {
+		memcpy(buf_pos, iv, ivsize);
+		buf_pos += ivsize;
+	}
+
+	cmd_len = buf_pos - buf;
+
+	return cmd_len;
+}
+
+static int hifn_setup_cmd_desc(struct hifn_device *dev,
+		struct hifn_context *ctx, struct hifn_request_context *rctx,
+		void *priv, unsigned int nbytes)
+{
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+	int cmd_len, sa_idx;
+	u8 *buf, *buf_pos;
+	u16 mask;
+
+	sa_idx = dma->cmdi;
+	buf_pos = buf = dma->command_bufs[dma->cmdi];
+
+	mask = 0;
+	switch (rctx->op) {
+	case ACRYPTO_OP_DECRYPT:
+		mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
+		break;
+	case ACRYPTO_OP_ENCRYPT:
+		mask = HIFN_BASE_CMD_CRYPT;
+		break;
+	case ACRYPTO_OP_HMAC:
+		mask = HIFN_BASE_CMD_MAC;
+		break;
+	default:
+		goto err_out;
+	}
+
+	buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
+			nbytes, mask, dev->snum);
+
+	if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) {
+		u16 md = 0;
+
+		if (ctx->keysize)
+			md |= HIFN_CRYPT_CMD_NEW_KEY;
+		if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB)
+			md |= HIFN_CRYPT_CMD_NEW_IV;
+
+		switch (rctx->mode) {
+		case ACRYPTO_MODE_ECB:
+			md |= HIFN_CRYPT_CMD_MODE_ECB;
+			break;
+		case ACRYPTO_MODE_CBC:
+			md |= HIFN_CRYPT_CMD_MODE_CBC;
+			break;
+		case ACRYPTO_MODE_CFB:
+			md |= HIFN_CRYPT_CMD_MODE_CFB;
+			break;
+		case ACRYPTO_MODE_OFB:
+			md |= HIFN_CRYPT_CMD_MODE_OFB;
+			break;
+		default:
+			goto err_out;
+		}
+
+		switch (rctx->type) {
+		case ACRYPTO_TYPE_AES_128:
+			if (ctx->keysize != 16)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_KSZ_128 |
+				HIFN_CRYPT_CMD_ALG_AES;
+			break;
+		case ACRYPTO_TYPE_AES_192:
+			if (ctx->keysize != 24)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_KSZ_192 |
+				HIFN_CRYPT_CMD_ALG_AES;
+			break;
+		case ACRYPTO_TYPE_AES_256:
+			if (ctx->keysize != 32)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_KSZ_256 |
+				HIFN_CRYPT_CMD_ALG_AES;
+			break;
+		case ACRYPTO_TYPE_3DES:
+			if (ctx->keysize != 24)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_ALG_3DES;
+			break;
+		case ACRYPTO_TYPE_DES:
+			if (ctx->keysize != 8)
+				goto err_out;
+			md |= HIFN_CRYPT_CMD_ALG_DES;
+			break;
+		default:
+			goto err_out;
+		}
+
+		buf_pos += hifn_setup_crypto_command(dev, buf_pos,
+				nbytes, nbytes, ctx->key, ctx->keysize,
+				rctx->iv, rctx->ivsize, md);
+	}
+
+	dev->sa[sa_idx] = priv;
+	dev->started++;
+
+	cmd_len = buf_pos - buf;
+	dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
+			HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
+
+	if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
+		dma->cmdr[dma->cmdi].l = __cpu_to_le32(
+			HIFN_D_VALID | HIFN_D_LAST |
+			HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
+		dma->cmdi = 0;
+	} else {
+		dma->cmdr[dma->cmdi - 1].l |= __cpu_to_le32(HIFN_D_VALID);
+	}
+
+	if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
+		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
+		dev->flags |= HIFN_FLAG_CMD_BUSY;
+	}
+	return 0;
+
+err_out:
+	return -EINVAL;
+}
+
+static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
+		unsigned int offset, unsigned int size, int last)
+{
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+	int idx;
+	dma_addr_t addr;
+
+	addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
+
+	idx = dma->srci;
+
+	dma->srcr[idx].p = __cpu_to_le32(addr);
+	dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
+			HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0));
+
+	if (++idx == HIFN_D_SRC_RSIZE) {
+		dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
+				HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
+				(last ? HIFN_D_LAST : 0));
+		idx = 0;
+	}
+
+	dma->srci = idx;
+	dma->srcu++;
+
+	if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
+		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
+		dev->flags |= HIFN_FLAG_SRC_BUSY;
+	}
+
+	return size;
+}
+
+static void hifn_setup_res_desc(struct hifn_device *dev)
+{
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+
+	dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
+			HIFN_D_VALID | HIFN_D_LAST);
+	/*
+	 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
+	 *					HIFN_D_LAST);
+	 */
+
+	if (++dma->resi == HIFN_D_RES_RSIZE) {
+		dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
+				HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
+		dma->resi = 0;
+	}
+
+	dma->resu++;
+
+	if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
+		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
+		dev->flags |= HIFN_FLAG_RES_BUSY;
+	}
+}
+
+static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
+		unsigned offset, unsigned size, int last)
+{
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+	int idx;
+	dma_addr_t addr;
+
+	addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
+
+	idx = dma->dsti;
+	dma->dstr[idx].p = __cpu_to_le32(addr);
+	dma->dstr[idx].l = __cpu_to_le32(size |	HIFN_D_VALID |
+			HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0));
+
+	if (++idx == HIFN_D_DST_RSIZE) {
+		dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
+				HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
+				(last ? HIFN_D_LAST : 0));
+		idx = 0;
+	}
+	dma->dsti = idx;
+	dma->dstu++;
+
+	if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
+		hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
+		dev->flags |= HIFN_FLAG_DST_BUSY;
+	}
+}
+
+static int hifn_setup_dma(struct hifn_device *dev,
+		struct hifn_context *ctx, struct hifn_request_context *rctx,
+		struct scatterlist *src, struct scatterlist *dst,
+		unsigned int nbytes, void *priv)
+{
+	struct scatterlist *t;
+	struct page *spage, *dpage;
+	unsigned int soff, doff;
+	unsigned int n, len;
+
+	n = nbytes;
+	while (n) {
+		spage = sg_page(src);
+		soff = src->offset;
+		len = min(src->length, n);
+
+		hifn_setup_src_desc(dev, spage, soff, len, n - len == 0);
+
+		src++;
+		n -= len;
+	}
+
+	t = &rctx->walk.cache[0];
+	n = nbytes;
+	while (n) {
+		if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
+			BUG_ON(!sg_page(t));
+			dpage = sg_page(t);
+			doff = 0;
+			len = t->length;
+		} else {
+			BUG_ON(!sg_page(dst));
+			dpage = sg_page(dst);
+			doff = dst->offset;
+			len = dst->length;
+		}
+		len = min(len, n);
+
+		hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0);
+
+		dst++;
+		t++;
+		n -= len;
+	}
+
+	hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes);
+	hifn_setup_res_desc(dev);
+	return 0;
+}
+
+static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
+		int num, gfp_t gfp_flags)
+{
+	int i;
+
+	num = min(ASYNC_SCATTERLIST_CACHE, num);
+	sg_init_table(w->cache, num);
+
+	w->num = 0;
+	for (i = 0; i < num; ++i) {
+		struct page *page = alloc_page(gfp_flags);
+		struct scatterlist *s;
+
+		if (!page)
+			break;
+
+		s = &w->cache[i];
+
+		sg_set_page(s, page, PAGE_SIZE, 0);
+		w->num++;
+	}
+
+	return i;
+}
+
+static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
+{
+	int i;
+
+	for (i = 0; i < w->num; ++i) {
+		struct scatterlist *s = &w->cache[i];
+
+		__free_page(sg_page(s));
+
+		s->length = 0;
+	}
+
+	w->num = 0;
+}
+
+static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
+		unsigned int size, unsigned int *nbytesp)
+{
+	unsigned int copy, drest = *drestp, nbytes = *nbytesp;
+	int idx = 0;
+
+	if (drest < size || size > nbytes)
+		return -EINVAL;
+
+	while (size) {
+		copy = min3(drest, size, dst->length);
+
+		size -= copy;
+		drest -= copy;
+		nbytes -= copy;
+
+		pr_debug("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
+			 __func__, copy, size, drest, nbytes);
+
+		dst++;
+		idx++;
+	}
+
+	*nbytesp = nbytes;
+	*drestp = drest;
+
+	return idx;
+}
+
+static int hifn_cipher_walk(struct ablkcipher_request *req,
+		struct hifn_cipher_walk *w)
+{
+	struct scatterlist *dst, *t;
+	unsigned int nbytes = req->nbytes, offset, copy, diff;
+	int idx, tidx, err;
+
+	tidx = idx = 0;
+	offset = 0;
+	while (nbytes) {
+		if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
+			return -EINVAL;
+
+		dst = &req->dst[idx];
+
+		pr_debug("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n",
+			 __func__, dst->length, dst->offset, offset, nbytes);
+
+		if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
+		    !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
+		    offset) {
+			unsigned slen = min(dst->length - offset, nbytes);
+			unsigned dlen = PAGE_SIZE;
+
+			t = &w->cache[idx];
+
+			err = ablkcipher_add(&dlen, dst, slen, &nbytes);
+			if (err < 0)
+				return err;
+
+			idx += err;
+
+			copy = slen & ~(HIFN_D_DST_DALIGN - 1);
+			diff = slen & (HIFN_D_DST_DALIGN - 1);
+
+			if (dlen < nbytes) {
+				/*
+				 * Destination page does not have enough space
+				 * to put there additional blocksized chunk,
+				 * so we mark that page as containing only
+				 * blocksize aligned chunks:
+				 *	t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
+				 * and increase number of bytes to be processed
+				 * in next chunk:
+				 *	nbytes += diff;
+				 */
+				nbytes += diff;
+
+				/*
+				 * Temporary of course...
+				 * Kick author if you will catch this one.
+				 */
+				pr_err("%s: dlen: %u, nbytes: %u, slen: %u, offset: %u.\n",
+				       __func__, dlen, nbytes, slen, offset);
+				pr_err("%s: please contact author to fix this "
+				       "issue, generally you should not catch "
+				       "this path under any condition but who "
+				       "knows how did you use crypto code.\n"
+				       "Thank you.\n",	__func__);
+				BUG();
+			} else {
+				copy += diff + nbytes;
+
+				dst = &req->dst[idx];
+
+				err = ablkcipher_add(&dlen, dst, nbytes, &nbytes);
+				if (err < 0)
+					return err;
+
+				idx += err;
+			}
+
+			t->length = copy;
+			t->offset = offset;
+		} else {
+			nbytes -= min(dst->length, nbytes);
+			idx++;
+		}
+
+		tidx++;
+	}
+
+	return tidx;
+}
+
+static int hifn_setup_session(struct ablkcipher_request *req)
+{
+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+	struct hifn_device *dev = ctx->dev;
+	unsigned long dlen, flags;
+	unsigned int nbytes = req->nbytes, idx = 0;
+	int err = -EINVAL, sg_num;
+	struct scatterlist *dst;
+
+	if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB)
+		goto err_out_exit;
+
+	rctx->walk.flags = 0;
+
+	while (nbytes) {
+		dst = &req->dst[idx];
+		dlen = min(dst->length, nbytes);
+
+		if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
+		    !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
+			rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
+
+		nbytes -= dlen;
+		idx++;
+	}
+
+	if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
+		err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
+		if (err < 0)
+			return err;
+	}
+
+	sg_num = hifn_cipher_walk(req, &rctx->walk);
+	if (sg_num < 0) {
+		err = sg_num;
+		goto err_out_exit;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
+		err = -EAGAIN;
+		goto err_out;
+	}
+
+	err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req);
+	if (err)
+		goto err_out;
+
+	dev->snum++;
+
+	dev->active = HIFN_DEFAULT_ACTIVE_NUM;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+
+err_out:
+	spin_unlock_irqrestore(&dev->lock, flags);
+err_out_exit:
+	if (err) {
+		dev_info(&dev->pdev->dev, "iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
+			 "type: %u, err: %d.\n",
+			 rctx->iv, rctx->ivsize,
+			 ctx->key, ctx->keysize,
+			 rctx->mode, rctx->op, rctx->type, err);
+	}
+
+	return err;
+}
+
+static int hifn_start_device(struct hifn_device *dev)
+{
+	int err;
+
+	dev->started = dev->active = 0;
+	hifn_reset_dma(dev, 1);
+
+	err = hifn_enable_crypto(dev);
+	if (err)
+		return err;
+
+	hifn_reset_puc(dev);
+
+	hifn_init_dma(dev);
+
+	hifn_init_registers(dev);
+
+	hifn_init_pubrng(dev);
+
+	return 0;
+}
+
+static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
+		struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
+{
+	unsigned int srest = *srestp, nbytes = *nbytesp, copy;
+	void *daddr;
+	int idx = 0;
+
+	if (srest < size || size > nbytes)
+		return -EINVAL;
+
+	while (size) {
+		copy = min3(srest, dst->length, size);
+
+		daddr = kmap_atomic(sg_page(dst));
+		memcpy(daddr + dst->offset + offset, saddr, copy);
+		kunmap_atomic(daddr);
+
+		nbytes -= copy;
+		size -= copy;
+		srest -= copy;
+		saddr += copy;
+		offset = 0;
+
+		pr_debug("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
+			 __func__, copy, size, srest, nbytes);
+
+		dst++;
+		idx++;
+	}
+
+	*nbytesp = nbytes;
+	*srestp = srest;
+
+	return idx;
+}
+
+static inline void hifn_complete_sa(struct hifn_device *dev, int i)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->sa[i] = NULL;
+	dev->started--;
+	if (dev->started < 0)
+		dev_info(&dev->pdev->dev, "%s: started: %d.\n", __func__,
+			 dev->started);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	BUG_ON(dev->started < 0);
+}
+
+static void hifn_process_ready(struct ablkcipher_request *req, int error)
+{
+	struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+
+	if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
+		unsigned int nbytes = req->nbytes;
+		int idx = 0, err;
+		struct scatterlist *dst, *t;
+		void *saddr;
+
+		while (nbytes) {
+			t = &rctx->walk.cache[idx];
+			dst = &req->dst[idx];
+
+			pr_debug("\n%s: sg_page(t): %p, t->length: %u, "
+				"sg_page(dst): %p, dst->length: %u, "
+				"nbytes: %u.\n",
+				__func__, sg_page(t), t->length,
+				sg_page(dst), dst->length, nbytes);
+
+			if (!t->length) {
+				nbytes -= min(dst->length, nbytes);
+				idx++;
+				continue;
+			}
+
+			saddr = kmap_atomic(sg_page(t));
+
+			err = ablkcipher_get(saddr, &t->length, t->offset,
+					dst, nbytes, &nbytes);
+			if (err < 0) {
+				kunmap_atomic(saddr);
+				break;
+			}
+
+			idx += err;
+			kunmap_atomic(saddr);
+		}
+
+		hifn_cipher_walk_exit(&rctx->walk);
+	}
+
+	req->base.complete(&req->base, error);
+}
+
+static void hifn_clear_rings(struct hifn_device *dev, int error)
+{
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+	int i, u;
+
+	dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
+			"k: %d.%d.%d.%d.\n",
+			dma->cmdi, dma->srci, dma->dsti, dma->resi,
+			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
+			dma->cmdk, dma->srck, dma->dstk, dma->resk);
+
+	i = dma->resk; u = dma->resu;
+	while (u != 0) {
+		if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
+			break;
+
+		if (dev->sa[i]) {
+			dev->success++;
+			dev->reset = 0;
+			hifn_process_ready(dev->sa[i], error);
+			hifn_complete_sa(dev, i);
+		}
+
+		if (++i == HIFN_D_RES_RSIZE)
+			i = 0;
+		u--;
+	}
+	dma->resk = i; dma->resu = u;
+
+	i = dma->srck; u = dma->srcu;
+	while (u != 0) {
+		if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
+			break;
+		if (++i == HIFN_D_SRC_RSIZE)
+			i = 0;
+		u--;
+	}
+	dma->srck = i; dma->srcu = u;
+
+	i = dma->cmdk; u = dma->cmdu;
+	while (u != 0) {
+		if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
+			break;
+		if (++i == HIFN_D_CMD_RSIZE)
+			i = 0;
+		u--;
+	}
+	dma->cmdk = i; dma->cmdu = u;
+
+	i = dma->dstk; u = dma->dstu;
+	while (u != 0) {
+		if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
+			break;
+		if (++i == HIFN_D_DST_RSIZE)
+			i = 0;
+		u--;
+	}
+	dma->dstk = i; dma->dstu = u;
+
+	dev_dbg(&dev->pdev->dev, "ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
+			"k: %d.%d.%d.%d.\n",
+			dma->cmdi, dma->srci, dma->dsti, dma->resi,
+			dma->cmdu, dma->srcu, dma->dstu, dma->resu,
+			dma->cmdk, dma->srck, dma->dstk, dma->resk);
+}
+
+static void hifn_work(struct work_struct *work)
+{
+	struct delayed_work *dw = to_delayed_work(work);
+	struct hifn_device *dev = container_of(dw, struct hifn_device, work);
+	unsigned long flags;
+	int reset = 0;
+	u32 r = 0;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->active == 0) {
+		struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+
+		if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
+			dev->flags &= ~HIFN_FLAG_CMD_BUSY;
+			r |= HIFN_DMACSR_C_CTRL_DIS;
+		}
+		if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
+			dev->flags &= ~HIFN_FLAG_SRC_BUSY;
+			r |= HIFN_DMACSR_S_CTRL_DIS;
+		}
+		if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
+			dev->flags &= ~HIFN_FLAG_DST_BUSY;
+			r |= HIFN_DMACSR_D_CTRL_DIS;
+		}
+		if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
+			dev->flags &= ~HIFN_FLAG_RES_BUSY;
+			r |= HIFN_DMACSR_R_CTRL_DIS;
+		}
+		if (r)
+			hifn_write_1(dev, HIFN_1_DMA_CSR, r);
+	} else
+		dev->active--;
+
+	if ((dev->prev_success == dev->success) && dev->started)
+		reset = 1;
+	dev->prev_success = dev->success;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (reset) {
+		if (++dev->reset >= 5) {
+			int i;
+			struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+
+			dev_info(&dev->pdev->dev,
+				 "r: %08x, active: %d, started: %d, "
+				 "success: %lu: qlen: %u/%u, reset: %d.\n",
+				 r, dev->active, dev->started,
+				 dev->success, dev->queue.qlen, dev->queue.max_qlen,
+				 reset);
+
+			dev_info(&dev->pdev->dev, "%s: res: ", __func__);
+			for (i = 0; i < HIFN_D_RES_RSIZE; ++i) {
+				pr_info("%x.%p ", dma->resr[i].l, dev->sa[i]);
+				if (dev->sa[i]) {
+					hifn_process_ready(dev->sa[i], -ENODEV);
+					hifn_complete_sa(dev, i);
+				}
+			}
+			pr_info("\n");
+
+			hifn_reset_dma(dev, 1);
+			hifn_stop_device(dev);
+			hifn_start_device(dev);
+			dev->reset = 0;
+		}
+
+		tasklet_schedule(&dev->tasklet);
+	}
+
+	schedule_delayed_work(&dev->work, HZ);
+}
+
+static irqreturn_t hifn_interrupt(int irq, void *data)
+{
+	struct hifn_device *dev = (struct hifn_device *)data;
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+	u32 dmacsr, restart;
+
+	dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
+
+	dev_dbg(&dev->pdev->dev, "1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
+			"i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
+		dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
+		dma->cmdi, dma->srci, dma->dsti, dma->resi,
+		dma->cmdu, dma->srcu, dma->dstu, dma->resu);
+
+	if ((dmacsr & dev->dmareg) == 0)
+		return IRQ_NONE;
+
+	hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
+
+	if (dmacsr & HIFN_DMACSR_ENGINE)
+		hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
+	if (dmacsr & HIFN_DMACSR_PUBDONE)
+		hifn_write_1(dev, HIFN_1_PUB_STATUS,
+			hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
+
+	restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
+	if (restart) {
+		u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
+
+		dev_warn(&dev->pdev->dev, "overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
+			 !!(dmacsr & HIFN_DMACSR_R_OVER),
+			 !!(dmacsr & HIFN_DMACSR_D_OVER),
+			puisr, !!(puisr & HIFN_PUISR_DSTOVER));
+		if (!!(puisr & HIFN_PUISR_DSTOVER))
+			hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
+		hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
+					HIFN_DMACSR_D_OVER));
+	}
+
+	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
+			HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
+	if (restart) {
+		dev_warn(&dev->pdev->dev, "abort: c: %d, s: %d, d: %d, r: %d.\n",
+			 !!(dmacsr & HIFN_DMACSR_C_ABORT),
+			 !!(dmacsr & HIFN_DMACSR_S_ABORT),
+			 !!(dmacsr & HIFN_DMACSR_D_ABORT),
+			 !!(dmacsr & HIFN_DMACSR_R_ABORT));
+		hifn_reset_dma(dev, 1);
+		hifn_init_dma(dev);
+		hifn_init_registers(dev);
+	}
+
+	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
+		dev_dbg(&dev->pdev->dev, "wait on command.\n");
+		dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
+		hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
+	}
+
+	tasklet_schedule(&dev->tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static void hifn_flush(struct hifn_device *dev)
+{
+	unsigned long flags;
+	struct crypto_async_request *async_req;
+	struct ablkcipher_request *req;
+	struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
+	int i;
+
+	for (i = 0; i < HIFN_D_RES_RSIZE; ++i) {
+		struct hifn_desc *d = &dma->resr[i];
+
+		if (dev->sa[i]) {
+			hifn_process_ready(dev->sa[i],
+				(d->l & __cpu_to_le32(HIFN_D_VALID)) ? -ENODEV : 0);
+			hifn_complete_sa(dev, i);
+		}
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while ((async_req = crypto_dequeue_request(&dev->queue))) {
+		req = ablkcipher_request_cast(async_req);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		hifn_process_ready(req, -ENODEV);
+
+		spin_lock_irqsave(&dev->lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct hifn_context *ctx = crypto_tfm_ctx(tfm);
+	struct hifn_device *dev = ctx->dev;
+
+	if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -1;
+	}
+
+	if (len == HIFN_DES_KEY_LENGTH) {
+		u32 tmp[DES_EXPKEY_WORDS];
+		int ret = des_ekey(tmp, key);
+
+		if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			return -EINVAL;
+		}
+	}
+
+	dev->flags &= ~HIFN_FLAG_OLD_KEY;
+
+	memcpy(ctx->key, key, len);
+	ctx->keysize = len;
+
+	return 0;
+}
+
+static int hifn_handle_req(struct ablkcipher_request *req)
+{
+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct hifn_device *dev = ctx->dev;
+	int err = -EAGAIN;
+
+	if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
+		err = hifn_setup_session(req);
+
+	if (err == -EAGAIN) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		err = ablkcipher_enqueue_request(&dev->queue, req);
+		spin_unlock_irqrestore(&dev->lock, flags);
+	}
+
+	return err;
+}
+
+static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
+		u8 type, u8 mode)
+{
+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
+	unsigned ivsize;
+
+	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+
+	if (req->info && mode != ACRYPTO_MODE_ECB) {
+		if (type == ACRYPTO_TYPE_AES_128)
+			ivsize = HIFN_AES_IV_LENGTH;
+		else if (type == ACRYPTO_TYPE_DES)
+			ivsize = HIFN_DES_KEY_LENGTH;
+		else if (type == ACRYPTO_TYPE_3DES)
+			ivsize = HIFN_3DES_KEY_LENGTH;
+	}
+
+	if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
+		if (ctx->keysize == 24)
+			type = ACRYPTO_TYPE_AES_192;
+		else if (ctx->keysize == 32)
+			type = ACRYPTO_TYPE_AES_256;
+	}
+
+	rctx->op = op;
+	rctx->mode = mode;
+	rctx->type = type;
+	rctx->iv = req->info;
+	rctx->ivsize = ivsize;
+
+	/*
+	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
+	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
+	 * HEAVY TODO: needs to kick Herbert XU to write documentation.
+	 */
+
+	return hifn_handle_req(req);
+}
+
+static int hifn_process_queue(struct hifn_device *dev)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct ablkcipher_request *req;
+	unsigned long flags;
+	int err = 0;
+
+	while (dev->started < HIFN_QUEUE_LENGTH) {
+		spin_lock_irqsave(&dev->lock, flags);
+		backlog = crypto_get_backlog(&dev->queue);
+		async_req = crypto_dequeue_request(&dev->queue);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		if (!async_req)
+			break;
+
+		if (backlog)
+			backlog->complete(backlog, -EINPROGRESS);
+
+		req = ablkcipher_request_cast(async_req);
+
+		err = hifn_handle_req(req);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
+static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
+		u8 type, u8 mode)
+{
+	int err;
+	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct hifn_device *dev = ctx->dev;
+
+	err = hifn_setup_crypto_req(req, op, type, mode);
+	if (err)
+		return err;
+
+	if (dev->started < HIFN_QUEUE_LENGTH &&	dev->queue.qlen)
+		hifn_process_queue(dev);
+
+	return -EINPROGRESS;
+}
+
+/*
+ * AES ecryption functions.
+ */
+static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
+}
+static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
+}
+static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
+}
+static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
+}
+
+/*
+ * AES decryption functions.
+ */
+static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
+}
+static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
+}
+static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
+}
+static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
+}
+
+/*
+ * DES ecryption functions.
+ */
+static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
+}
+static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
+}
+static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
+}
+static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
+}
+
+/*
+ * DES decryption functions.
+ */
+static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
+}
+static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
+}
+static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
+}
+static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
+}
+
+/*
+ * 3DES ecryption functions.
+ */
+static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
+}
+static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
+}
+static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
+}
+static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
+}
+
+/* 3DES decryption functions. */
+static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
+}
+static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
+}
+static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
+}
+static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
+{
+	return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
+			ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
+}
+
+struct hifn_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char drv_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int bsize;
+	struct ablkcipher_alg ablkcipher;
+};
+
+static struct hifn_alg_template hifn_alg_templates[] = {
+	/*
+	 * 3DES ECB, CBC, CFB and OFB modes.
+	 */
+	{
+		.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
+		.ablkcipher = {
+			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
+			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_3des_cfb,
+			.decrypt	=	hifn_decrypt_3des_cfb,
+		},
+	},
+	{
+		.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
+		.ablkcipher = {
+			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
+			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_3des_ofb,
+			.decrypt	=	hifn_decrypt_3des_ofb,
+		},
+	},
+	{
+		.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
+		.ablkcipher = {
+			.ivsize		=	HIFN_IV_LENGTH,
+			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
+			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_3des_cbc,
+			.decrypt	=	hifn_decrypt_3des_cbc,
+		},
+	},
+	{
+		.name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
+		.ablkcipher = {
+			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
+			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_3des_ecb,
+			.decrypt	=	hifn_decrypt_3des_ecb,
+		},
+	},
+
+	/*
+	 * DES ECB, CBC, CFB and OFB modes.
+	 */
+	{
+		.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
+		.ablkcipher = {
+			.min_keysize	=	HIFN_DES_KEY_LENGTH,
+			.max_keysize	=	HIFN_DES_KEY_LENGTH,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_des_cfb,
+			.decrypt	=	hifn_decrypt_des_cfb,
+		},
+	},
+	{
+		.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
+		.ablkcipher = {
+			.min_keysize	=	HIFN_DES_KEY_LENGTH,
+			.max_keysize	=	HIFN_DES_KEY_LENGTH,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_des_ofb,
+			.decrypt	=	hifn_decrypt_des_ofb,
+		},
+	},
+	{
+		.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
+		.ablkcipher = {
+			.ivsize		=	HIFN_IV_LENGTH,
+			.min_keysize	=	HIFN_DES_KEY_LENGTH,
+			.max_keysize	=	HIFN_DES_KEY_LENGTH,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_des_cbc,
+			.decrypt	=	hifn_decrypt_des_cbc,
+		},
+	},
+	{
+		.name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
+		.ablkcipher = {
+			.min_keysize	=	HIFN_DES_KEY_LENGTH,
+			.max_keysize	=	HIFN_DES_KEY_LENGTH,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_des_ecb,
+			.decrypt	=	hifn_decrypt_des_ecb,
+		},
+	},
+
+	/*
+	 * AES ECB, CBC, CFB and OFB modes.
+	 */
+	{
+		.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
+		.ablkcipher = {
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_aes_ecb,
+			.decrypt	=	hifn_decrypt_aes_ecb,
+		},
+	},
+	{
+		.name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
+		.ablkcipher = {
+			.ivsize		=	HIFN_AES_IV_LENGTH,
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_aes_cbc,
+			.decrypt	=	hifn_decrypt_aes_cbc,
+		},
+	},
+	{
+		.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
+		.ablkcipher = {
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_aes_cfb,
+			.decrypt	=	hifn_decrypt_aes_cfb,
+		},
+	},
+	{
+		.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
+		.ablkcipher = {
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey		=	hifn_setkey,
+			.encrypt	=	hifn_encrypt_aes_ofb,
+			.decrypt	=	hifn_decrypt_aes_ofb,
+		},
+	},
+};
+
+static int hifn_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
+	struct hifn_context *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->dev = ha->dev;
+	tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context);
+	return 0;
+}
+
+static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
+{
+	struct hifn_crypto_alg *alg;
+	int err;
+
+	alg = kzalloc(sizeof(*alg), GFP_KERNEL);
+	if (!alg)
+		return -ENOMEM;
+
+	snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
+	snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
+		 t->drv_name, dev->name);
+
+	alg->alg.cra_priority = 300;
+	alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+	alg->alg.cra_blocksize = t->bsize;
+	alg->alg.cra_ctxsize = sizeof(struct hifn_context);
+	alg->alg.cra_alignmask = 0;
+	alg->alg.cra_type = &crypto_ablkcipher_type;
+	alg->alg.cra_module = THIS_MODULE;
+	alg->alg.cra_u.ablkcipher = t->ablkcipher;
+	alg->alg.cra_init = hifn_cra_init;
+
+	alg->dev = dev;
+
+	list_add_tail(&alg->entry, &dev->alg_list);
+
+	err = crypto_register_alg(&alg->alg);
+	if (err) {
+		list_del(&alg->entry);
+		kfree(alg);
+	}
+
+	return err;
+}
+
+static void hifn_unregister_alg(struct hifn_device *dev)
+{
+	struct hifn_crypto_alg *a, *n;
+
+	list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
+		list_del(&a->entry);
+		crypto_unregister_alg(&a->alg);
+		kfree(a);
+	}
+}
+
+static int hifn_register_alg(struct hifn_device *dev)
+{
+	int i, err;
+
+	for (i = 0; i < ARRAY_SIZE(hifn_alg_templates); ++i) {
+		err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
+		if (err)
+			goto err_out_exit;
+	}
+
+	return 0;
+
+err_out_exit:
+	hifn_unregister_alg(dev);
+	return err;
+}
+
+static void hifn_tasklet_callback(unsigned long data)
+{
+	struct hifn_device *dev = (struct hifn_device *)data;
+
+	/*
+	 * This is ok to call this without lock being held,
+	 * althogh it modifies some parameters used in parallel,
+	 * (like dev->success), but they are used in process
+	 * context or update is atomic (like setting dev->sa[i] to NULL).
+	 */
+	hifn_clear_rings(dev, 0);
+
+	if (dev->started < HIFN_QUEUE_LENGTH &&	dev->queue.qlen)
+		hifn_process_queue(dev);
+}
+
+static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int err, i;
+	struct hifn_device *dev;
+	char name[8];
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err)
+		goto err_out_disable_pci_device;
+
+	snprintf(name, sizeof(name), "hifn%d",
+			atomic_inc_return(&hifn_dev_number) - 1);
+
+	err = pci_request_regions(pdev, name);
+	if (err)
+		goto err_out_disable_pci_device;
+
+	if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
+	    pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
+	    pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
+		dev_err(&pdev->dev, "Broken hardware - I/O regions are too small.\n");
+		err = -ENODEV;
+		goto err_out_free_regions;
+	}
+
+	dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
+			GFP_KERNEL);
+	if (!dev) {
+		err = -ENOMEM;
+		goto err_out_free_regions;
+	}
+
+	INIT_LIST_HEAD(&dev->alg_list);
+
+	snprintf(dev->name, sizeof(dev->name), "%s", name);
+	spin_lock_init(&dev->lock);
+
+	for (i = 0; i < 3; ++i) {
+		unsigned long addr, size;
+
+		addr = pci_resource_start(pdev, i);
+		size = pci_resource_len(pdev, i);
+
+		dev->bar[i] = ioremap_nocache(addr, size);
+		if (!dev->bar[i]) {
+			err = -ENOMEM;
+			goto err_out_unmap_bars;
+		}
+	}
+
+	dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma),
+					       &dev->desc_dma);
+	if (!dev->desc_virt) {
+		dev_err(&pdev->dev, "Failed to allocate descriptor rings.\n");
+		err = -ENOMEM;
+		goto err_out_unmap_bars;
+	}
+
+	dev->pdev = pdev;
+	dev->irq = pdev->irq;
+
+	for (i = 0; i < HIFN_D_RES_RSIZE; ++i)
+		dev->sa[i] = NULL;
+
+	pci_set_drvdata(pdev, dev);
+
+	tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev);
+
+	crypto_init_queue(&dev->queue, 1);
+
+	err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to request IRQ%d: err: %d.\n",
+			dev->irq, err);
+		dev->irq = 0;
+		goto err_out_free_desc;
+	}
+
+	err = hifn_start_device(dev);
+	if (err)
+		goto err_out_free_irq;
+
+	err = hifn_register_rng(dev);
+	if (err)
+		goto err_out_stop_device;
+
+	err = hifn_register_alg(dev);
+	if (err)
+		goto err_out_unregister_rng;
+
+	INIT_DELAYED_WORK(&dev->work, hifn_work);
+	schedule_delayed_work(&dev->work, HZ);
+
+	dev_dbg(&pdev->dev, "HIFN crypto accelerator card at %s has been "
+		"successfully registered as %s.\n",
+		pci_name(pdev), dev->name);
+
+	return 0;
+
+err_out_unregister_rng:
+	hifn_unregister_rng(dev);
+err_out_stop_device:
+	hifn_reset_dma(dev, 1);
+	hifn_stop_device(dev);
+err_out_free_irq:
+	free_irq(dev->irq, dev);
+	tasklet_kill(&dev->tasklet);
+err_out_free_desc:
+	pci_free_consistent(pdev, sizeof(struct hifn_dma),
+			dev->desc_virt, dev->desc_dma);
+
+err_out_unmap_bars:
+	for (i = 0; i < 3; ++i)
+		if (dev->bar[i])
+			iounmap(dev->bar[i]);
+	kfree(dev);
+
+err_out_free_regions:
+	pci_release_regions(pdev);
+
+err_out_disable_pci_device:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void hifn_remove(struct pci_dev *pdev)
+{
+	int i;
+	struct hifn_device *dev;
+
+	dev = pci_get_drvdata(pdev);
+
+	if (dev) {
+		cancel_delayed_work_sync(&dev->work);
+
+		hifn_unregister_rng(dev);
+		hifn_unregister_alg(dev);
+		hifn_reset_dma(dev, 1);
+		hifn_stop_device(dev);
+
+		free_irq(dev->irq, dev);
+		tasklet_kill(&dev->tasklet);
+
+		hifn_flush(dev);
+
+		pci_free_consistent(pdev, sizeof(struct hifn_dma),
+				dev->desc_virt, dev->desc_dma);
+		for (i = 0; i < 3; ++i)
+			if (dev->bar[i])
+				iounmap(dev->bar[i]);
+
+		kfree(dev);
+	}
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_device_id hifn_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
+
+static struct pci_driver hifn_pci_driver = {
+	.name     = "hifn795x",
+	.id_table = hifn_pci_tbl,
+	.probe    = hifn_probe,
+	.remove   = hifn_remove,
+};
+
+static int __init hifn_init(void)
+{
+	unsigned int freq;
+	int err;
+
+	/* HIFN supports only 32-bit addresses */
+	BUILD_BUG_ON(sizeof(dma_addr_t) != 4);
+
+	if (strncmp(hifn_pll_ref, "ext", 3) &&
+	    strncmp(hifn_pll_ref, "pci", 3)) {
+		pr_err("hifn795x: invalid hifn_pll_ref clock, must be pci or ext");
+		return -EINVAL;
+	}
+
+	/*
+	 * For the 7955/7956 the reference clock frequency must be in the
+	 * range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz,
+	 * but this chip is currently not supported.
+	 */
+	if (hifn_pll_ref[3] != '\0') {
+		freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
+		if (freq < 20 || freq > 100) {
+			pr_err("hifn795x: invalid hifn_pll_ref frequency, must"
+			       "be in the range of 20-100");
+			return -EINVAL;
+		}
+	}
+
+	err = pci_register_driver(&hifn_pci_driver);
+	if (err < 0) {
+		pr_err("Failed to register PCI driver for %s device.\n",
+		       hifn_pci_driver.name);
+		return -ENODEV;
+	}
+
+	pr_info("Driver for HIFN 795x crypto accelerator chip "
+		"has been successfully registered.\n");
+
+	return 0;
+}
+
+static void __exit hifn_fini(void)
+{
+	pci_unregister_driver(&hifn_pci_driver);
+
+	pr_info("Driver for HIFN 795x crypto accelerator chip "
+		"has been successfully unregistered.\n");
+}
+
+module_init(hifn_init);
+module_exit(hifn_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
new file mode 100644
index 0000000..8ca9c50
--- /dev/null
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CRYPTO_DEV_HISI_SEC
+	tristate "Support for Hisilicon SEC crypto block cipher accelerator"
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_ALGAPI
+	select SG_SPLIT
+	depends on ARM64 || COMPILE_TEST
+	depends on HAS_IOMEM
+	help
+	  Support for Hisilicon SEC Engine in Hip06 and Hip07
+
+	  To compile this as a module, choose M here: the module
+	  will be called hisi_sec.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
new file mode 100644
index 0000000..463f46a
--- /dev/null
+++ b/drivers/crypto/hisilicon/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
diff --git a/drivers/crypto/hisilicon/sec/Makefile b/drivers/crypto/hisilicon/sec/Makefile
new file mode 100644
index 0000000..a55b698
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += hisi_sec.o
+hisi_sec-y = sec_algs.o sec_drv.o
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
new file mode 100644
index 0000000..cdc4f9a
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -0,0 +1,1125 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/skcipher.h>
+#include <crypto/xts.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sec_drv.h"
+
+#define SEC_MAX_CIPHER_KEY		64
+#define SEC_REQ_LIMIT SZ_32M
+
+struct sec_c_alg_cfg {
+	unsigned c_alg		: 3;
+	unsigned c_mode		: 3;
+	unsigned key_len	: 2;
+	unsigned c_width	: 2;
+};
+
+static const struct sec_c_alg_cfg sec_c_alg_cfgs[] =  {
+	[SEC_C_DES_ECB_64] = {
+		.c_alg = SEC_C_ALG_DES,
+		.c_mode = SEC_C_MODE_ECB,
+		.key_len = SEC_KEY_LEN_DES,
+	},
+	[SEC_C_DES_CBC_64] = {
+		.c_alg = SEC_C_ALG_DES,
+		.c_mode = SEC_C_MODE_CBC,
+		.key_len = SEC_KEY_LEN_DES,
+	},
+	[SEC_C_3DES_ECB_192_3KEY] = {
+		.c_alg = SEC_C_ALG_3DES,
+		.c_mode = SEC_C_MODE_ECB,
+		.key_len = SEC_KEY_LEN_3DES_3_KEY,
+	},
+	[SEC_C_3DES_ECB_192_2KEY] = {
+		.c_alg = SEC_C_ALG_3DES,
+		.c_mode = SEC_C_MODE_ECB,
+		.key_len = SEC_KEY_LEN_3DES_2_KEY,
+	},
+	[SEC_C_3DES_CBC_192_3KEY] = {
+		.c_alg = SEC_C_ALG_3DES,
+		.c_mode = SEC_C_MODE_CBC,
+		.key_len = SEC_KEY_LEN_3DES_3_KEY,
+	},
+	[SEC_C_3DES_CBC_192_2KEY] = {
+		.c_alg = SEC_C_ALG_3DES,
+		.c_mode = SEC_C_MODE_CBC,
+		.key_len = SEC_KEY_LEN_3DES_2_KEY,
+	},
+	[SEC_C_AES_ECB_128] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_ECB,
+		.key_len = SEC_KEY_LEN_AES_128,
+	},
+	[SEC_C_AES_ECB_192] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_ECB,
+		.key_len = SEC_KEY_LEN_AES_192,
+	},
+	[SEC_C_AES_ECB_256] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_ECB,
+		.key_len = SEC_KEY_LEN_AES_256,
+	},
+	[SEC_C_AES_CBC_128] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_CBC,
+		.key_len = SEC_KEY_LEN_AES_128,
+	},
+	[SEC_C_AES_CBC_192] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_CBC,
+		.key_len = SEC_KEY_LEN_AES_192,
+	},
+	[SEC_C_AES_CBC_256] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_CBC,
+		.key_len = SEC_KEY_LEN_AES_256,
+	},
+	[SEC_C_AES_CTR_128] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_CTR,
+		.key_len = SEC_KEY_LEN_AES_128,
+	},
+	[SEC_C_AES_CTR_192] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_CTR,
+		.key_len = SEC_KEY_LEN_AES_192,
+	},
+	[SEC_C_AES_CTR_256] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_CTR,
+		.key_len = SEC_KEY_LEN_AES_256,
+	},
+	[SEC_C_AES_XTS_128] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_XTS,
+		.key_len = SEC_KEY_LEN_AES_128,
+	},
+	[SEC_C_AES_XTS_256] = {
+		.c_alg = SEC_C_ALG_AES,
+		.c_mode = SEC_C_MODE_XTS,
+		.key_len = SEC_KEY_LEN_AES_256,
+	},
+	[SEC_C_NULL] = {
+	},
+};
+
+/*
+ * Mutex used to ensure safe operation of reference count of
+ * alg providers
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
+					   struct sec_bd_info *req,
+					   enum sec_cipher_alg alg)
+{
+	const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
+
+	memset(req, 0, sizeof(*req));
+	req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
+	req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
+	req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
+	req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
+
+	req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
+	req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
+}
+
+static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
+					  const u8 *key,
+					  unsigned int keylen,
+					  enum sec_cipher_alg alg)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+	struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->cipher_alg = alg;
+	memcpy(ctx->key, key, keylen);
+	sec_alg_skcipher_init_template(ctx, &ctx->req_template,
+				       ctx->cipher_alg);
+}
+
+static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+				     dma_addr_t *psec_sgl,
+				     struct scatterlist *sgl,
+				     int count,
+				     struct sec_dev_info *info)
+{
+	struct sec_hw_sgl *sgl_current = NULL;
+	struct sec_hw_sgl *sgl_next;
+	dma_addr_t sgl_next_dma;
+	struct scatterlist *sg;
+	int ret, sge_index, i;
+
+	if (!count)
+		return -EINVAL;
+
+	for_each_sg(sgl, sg, count, i) {
+		sge_index = i % SEC_MAX_SGE_NUM;
+		if (sge_index == 0) {
+			sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
+						   GFP_KERNEL, &sgl_next_dma);
+			if (!sgl_next) {
+				ret = -ENOMEM;
+				goto err_free_hw_sgls;
+			}
+
+			if (!sgl_current) { /* First one */
+				*psec_sgl = sgl_next_dma;
+				*sec_sgl = sgl_next;
+			} else { /* Chained */
+				sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
+				sgl_current->next_sgl = sgl_next_dma;
+				sgl_current->next = sgl_next;
+			}
+			sgl_current = sgl_next;
+		}
+		sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
+		sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
+		sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
+	}
+	sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
+	sgl_current->next_sgl = 0;
+	(*sec_sgl)->entry_sum_in_chain = count;
+
+	return 0;
+
+err_free_hw_sgls:
+	sgl_current = *sec_sgl;
+	while (sgl_current) {
+		sgl_next = sgl_current->next;
+		dma_pool_free(info->hw_sgl_pool, sgl_current,
+			      sgl_current->next_sgl);
+		sgl_current = sgl_next;
+	}
+	*psec_sgl = 0;
+
+	return ret;
+}
+
+static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
+			    dma_addr_t psec_sgl, struct sec_dev_info *info)
+{
+	struct sec_hw_sgl *sgl_current, *sgl_next;
+
+	if (!hw_sgl)
+		return;
+	sgl_current = hw_sgl;
+	while (sgl_current->next) {
+		sgl_next = sgl_current->next;
+		dma_pool_free(info->hw_sgl_pool, sgl_current,
+			      sgl_current->next_sgl);
+		sgl_current = sgl_next;
+	}
+	dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
+}
+
+static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+				   const u8 *key, unsigned int keylen,
+				   enum sec_cipher_alg alg)
+{
+	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct device *dev = ctx->queue->dev_info->dev;
+
+	mutex_lock(&ctx->lock);
+	if (ctx->key) {
+		/* rekeying */
+		memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
+	} else {
+		/* new key */
+		ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
+					       &ctx->pkey, GFP_KERNEL);
+		if (!ctx->key) {
+			mutex_unlock(&ctx->lock);
+			return -ENOMEM;
+		}
+	}
+	mutex_unlock(&ctx->lock);
+	sec_alg_skcipher_init_context(tfm, key, keylen, alg);
+
+	return 0;
+}
+
+static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
+					   const u8 *key, unsigned int keylen)
+{
+	enum sec_cipher_alg alg;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		alg = SEC_C_AES_ECB_128;
+		break;
+	case AES_KEYSIZE_192:
+		alg = SEC_C_AES_ECB_192;
+		break;
+	case AES_KEYSIZE_256:
+		alg = SEC_C_AES_ECB_256;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
+					   const u8 *key, unsigned int keylen)
+{
+	enum sec_cipher_alg alg;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		alg = SEC_C_AES_CBC_128;
+		break;
+	case AES_KEYSIZE_192:
+		alg = SEC_C_AES_CBC_192;
+		break;
+	case AES_KEYSIZE_256:
+		alg = SEC_C_AES_CBC_256;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
+					   const u8 *key, unsigned int keylen)
+{
+	enum sec_cipher_alg alg;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		alg = SEC_C_AES_CTR_128;
+		break;
+	case AES_KEYSIZE_192:
+		alg = SEC_C_AES_CTR_192;
+		break;
+	case AES_KEYSIZE_256:
+		alg = SEC_C_AES_CTR_256;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
+					   const u8 *key, unsigned int keylen)
+{
+	enum sec_cipher_alg alg;
+	int ret;
+
+	ret = xts_verify_key(tfm, key, keylen);
+	if (ret)
+		return ret;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128 * 2:
+		alg = SEC_C_AES_XTS_128;
+		break;
+	case AES_KEYSIZE_256 * 2:
+		alg = SEC_C_AES_XTS_256;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
+					   const u8 *key, unsigned int keylen)
+{
+	if (keylen != DES_KEY_SIZE)
+		return -EINVAL;
+
+	return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
+}
+
+static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
+					   const u8 *key, unsigned int keylen)
+{
+	if (keylen != DES_KEY_SIZE)
+		return -EINVAL;
+
+	return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
+}
+
+static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
+					    const u8 *key, unsigned int keylen)
+{
+	if (keylen != DES_KEY_SIZE * 3)
+		return -EINVAL;
+
+	return sec_alg_skcipher_setkey(tfm, key, keylen,
+				       SEC_C_3DES_ECB_192_3KEY);
+}
+
+static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
+					    const u8 *key, unsigned int keylen)
+{
+	if (keylen != DES3_EDE_KEY_SIZE)
+		return -EINVAL;
+
+	return sec_alg_skcipher_setkey(tfm, key, keylen,
+				       SEC_C_3DES_CBC_192_3KEY);
+}
+
+static void sec_alg_free_el(struct sec_request_el *el,
+			    struct sec_dev_info *info)
+{
+	sec_free_hw_sgl(el->out, el->dma_out, info);
+	sec_free_hw_sgl(el->in, el->dma_in, info);
+	kfree(el->sgl_in);
+	kfree(el->sgl_out);
+	kfree(el);
+}
+
+/* queuelock must be held */
+static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
+{
+	struct sec_request_el *el, *temp;
+	int ret = 0;
+
+	mutex_lock(&sec_req->lock);
+	list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+		/*
+		 * Add to hardware queue only under following circumstances
+		 * 1) Software and hardware queue empty so no chain dependencies
+		 * 2) No dependencies as new IV - (check software queue empty
+		 *    to maintain order)
+		 * 3) No dependencies because the mode does no chaining.
+		 *
+		 * In other cases first insert onto the software queue which
+		 * is then emptied as requests complete
+		 */
+		if (!queue->havesoftqueue ||
+		    (kfifo_is_empty(&queue->softqueue) &&
+		     sec_queue_empty(queue))) {
+			ret = sec_queue_send(queue, &el->req, sec_req);
+			if (ret == -EAGAIN) {
+				/* Wait unti we can send then try again */
+				/* DEAD if here - should not happen */
+				ret = -EBUSY;
+				goto err_unlock;
+			}
+		} else {
+			kfifo_put(&queue->softqueue, el);
+		}
+	}
+err_unlock:
+	mutex_unlock(&sec_req->lock);
+
+	return ret;
+}
+
+static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
+				      struct crypto_async_request *req_base)
+{
+	struct skcipher_request *skreq = container_of(req_base,
+						      struct skcipher_request,
+						      base);
+	struct sec_request *sec_req = skcipher_request_ctx(skreq);
+	struct sec_request *backlog_req;
+	struct sec_request_el *sec_req_el, *nextrequest;
+	struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
+	struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+	struct device *dev = ctx->queue->dev_info->dev;
+	int icv_or_skey_en, ret;
+	bool done;
+
+	sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
+				      head);
+	icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
+		SEC_BD_W0_ICV_OR_SKEY_EN_S;
+	if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
+		dev_err(dev, "Got an invalid answer %lu %d\n",
+			sec_resp->w1 & SEC_BD_W1_BD_INVALID,
+			icv_or_skey_en);
+		sec_req->err = -EINVAL;
+		/*
+		 * We need to muddle on to avoid getting stuck with elements
+		 * on the queue. Error will be reported so requester so
+		 * it should be able to handle appropriately.
+		 */
+	}
+
+	mutex_lock(&ctx->queue->queuelock);
+	/* Put the IV in place for chained cases */
+	switch (ctx->cipher_alg) {
+	case SEC_C_AES_CBC_128:
+	case SEC_C_AES_CBC_192:
+	case SEC_C_AES_CBC_256:
+		if (sec_req_el->req.w0 & SEC_BD_W0_DE)
+			sg_pcopy_to_buffer(sec_req_el->sgl_out,
+					   sg_nents(sec_req_el->sgl_out),
+					   skreq->iv,
+					   crypto_skcipher_ivsize(atfm),
+					   sec_req_el->el_length -
+					   crypto_skcipher_ivsize(atfm));
+		else
+			sg_pcopy_to_buffer(sec_req_el->sgl_in,
+					   sg_nents(sec_req_el->sgl_in),
+					   skreq->iv,
+					   crypto_skcipher_ivsize(atfm),
+					   sec_req_el->el_length -
+					   crypto_skcipher_ivsize(atfm));
+		/* No need to sync to the device as coherent DMA */
+		break;
+	case SEC_C_AES_CTR_128:
+	case SEC_C_AES_CTR_192:
+	case SEC_C_AES_CTR_256:
+		crypto_inc(skreq->iv, 16);
+		break;
+	default:
+		/* Do not update */
+		break;
+	}
+
+	if (ctx->queue->havesoftqueue &&
+	    !kfifo_is_empty(&ctx->queue->softqueue) &&
+	    sec_queue_empty(ctx->queue)) {
+		ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
+		if (ret <= 0)
+			dev_err(dev,
+				"Error getting next element from kfifo %d\n",
+				ret);
+		else
+			/* We know there is space so this cannot fail */
+			sec_queue_send(ctx->queue, &nextrequest->req,
+				       nextrequest->sec_req);
+	} else if (!list_empty(&ctx->backlog)) {
+		/* Need to verify there is room first */
+		backlog_req = list_first_entry(&ctx->backlog,
+					       typeof(*backlog_req),
+					       backlog_head);
+		if (sec_queue_can_enqueue(ctx->queue,
+		    backlog_req->num_elements) ||
+		    (ctx->queue->havesoftqueue &&
+		     kfifo_avail(&ctx->queue->softqueue) >
+		     backlog_req->num_elements)) {
+			sec_send_request(backlog_req, ctx->queue);
+			backlog_req->req_base->complete(backlog_req->req_base,
+							-EINPROGRESS);
+			list_del(&backlog_req->backlog_head);
+		}
+	}
+	mutex_unlock(&ctx->queue->queuelock);
+
+	mutex_lock(&sec_req->lock);
+	list_del(&sec_req_el->head);
+	mutex_unlock(&sec_req->lock);
+	sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
+
+	/*
+	 * Request is done.
+	 * The dance is needed as the lock is freed in the completion
+	 */
+	mutex_lock(&sec_req->lock);
+	done = list_empty(&sec_req->elements);
+	mutex_unlock(&sec_req->lock);
+	if (done) {
+		if (crypto_skcipher_ivsize(atfm)) {
+			dma_unmap_single(dev, sec_req->dma_iv,
+					 crypto_skcipher_ivsize(atfm),
+					 DMA_TO_DEVICE);
+		}
+		dma_unmap_sg(dev, skreq->src, sec_req->len_in,
+			     DMA_BIDIRECTIONAL);
+		if (skreq->src != skreq->dst)
+			dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
+				     DMA_BIDIRECTIONAL);
+		skreq->base.complete(&skreq->base, sec_req->err);
+	}
+}
+
+void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
+{
+	struct sec_request *sec_req = shadow;
+
+	sec_req->cb(resp, sec_req->req_base);
+}
+
+static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
+					      int *steps)
+{
+	size_t *sizes;
+	int i;
+
+	/* Split into suitable sized blocks */
+	*steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
+	sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
+	if (!sizes)
+		return -ENOMEM;
+
+	for (i = 0; i < *steps - 1; i++)
+		sizes[i] = SEC_REQ_LIMIT;
+	sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
+	*split_sizes = sizes;
+
+	return 0;
+}
+
+static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
+				int steps, struct scatterlist ***splits,
+				int **splits_nents,
+				int sgl_len_in,
+				struct device *dev)
+{
+	int ret, count;
+
+	count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+	if (!count)
+		return -EINVAL;
+
+	*splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
+	if (!*splits) {
+		ret = -ENOMEM;
+		goto err_unmap_sg;
+	}
+	*splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
+	if (!*splits_nents) {
+		ret = -ENOMEM;
+		goto err_free_splits;
+	}
+
+	/* output the scatter list before and after this */
+	ret = sg_split(sgl, count, 0, steps, split_sizes,
+		       *splits, *splits_nents, GFP_KERNEL);
+	if (ret) {
+		ret = -ENOMEM;
+		goto err_free_splits_nents;
+	}
+
+	return 0;
+
+err_free_splits_nents:
+	kfree(*splits_nents);
+err_free_splits:
+	kfree(*splits);
+err_unmap_sg:
+	dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+
+	return ret;
+}
+
+/*
+ * Reverses the sec_map_and_split_sg call for messages not yet added to
+ * the queues.
+ */
+static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
+				struct scatterlist **splits, int *splits_nents,
+				int sgl_len_in, struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < steps; i++)
+		kfree(splits[i]);
+	kfree(splits_nents);
+	kfree(splits);
+
+	dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+}
+
+static struct sec_request_el
+*sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
+			   int el_size, bool different_dest,
+			   struct scatterlist *sgl_in, int n_ents_in,
+			   struct scatterlist *sgl_out, int n_ents_out,
+			   struct sec_dev_info *info)
+{
+	struct sec_request_el *el;
+	struct sec_bd_info *req;
+	int ret;
+
+	el = kzalloc(sizeof(*el), GFP_KERNEL);
+	if (!el)
+		return ERR_PTR(-ENOMEM);
+	el->el_length = el_size;
+	req = &el->req;
+	memcpy(req, template, sizeof(*req));
+
+	req->w0 &= ~SEC_BD_W0_CIPHER_M;
+	if (encrypt)
+		req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
+	else
+		req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
+
+	req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+	req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
+		SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+
+	req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+	req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
+		SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+
+	/* Writing whole u32 so no need to take care of masking */
+	req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
+		((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
+		 SEC_BD_W2_C_GRAN_SIZE_15_0_M);
+
+	req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
+	req->w1 |= SEC_BD_W1_ADDR_TYPE;
+
+	el->sgl_in = sgl_in;
+
+	ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
+					n_ents_in, info);
+	if (ret)
+		goto err_free_el;
+
+	req->data_addr_lo = lower_32_bits(el->dma_in);
+	req->data_addr_hi = upper_32_bits(el->dma_in);
+
+	if (different_dest) {
+		el->sgl_out = sgl_out;
+		ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
+						el->sgl_out,
+						n_ents_out, info);
+		if (ret)
+			goto err_free_hw_sgl_in;
+
+		req->w0 |= SEC_BD_W0_DE;
+		req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
+		req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
+
+	} else {
+		req->w0 &= ~SEC_BD_W0_DE;
+		req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
+		req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
+	}
+
+	return el;
+
+err_free_hw_sgl_in:
+	sec_free_hw_sgl(el->in, el->dma_in, info);
+err_free_el:
+	kfree(el);
+
+	return ERR_PTR(ret);
+}
+
+static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+				   bool encrypt)
+{
+	struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+	struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct sec_queue *queue = ctx->queue;
+	struct sec_request *sec_req = skcipher_request_ctx(skreq);
+	struct sec_dev_info *info = queue->dev_info;
+	int i, ret, steps;
+	size_t *split_sizes;
+	struct scatterlist **splits_in;
+	struct scatterlist **splits_out = NULL;
+	int *splits_in_nents;
+	int *splits_out_nents = NULL;
+	struct sec_request_el *el, *temp;
+	bool split = skreq->src != skreq->dst;
+
+	mutex_init(&sec_req->lock);
+	sec_req->req_base = &skreq->base;
+	sec_req->err = 0;
+	/* SGL mapping out here to allow us to break it up as necessary */
+	sec_req->len_in = sg_nents(skreq->src);
+
+	ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
+						 &steps);
+	if (ret)
+		return ret;
+	sec_req->num_elements = steps;
+	ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
+				   &splits_in_nents, sec_req->len_in,
+				   info->dev);
+	if (ret)
+		goto err_free_split_sizes;
+
+	if (split) {
+		sec_req->len_out = sg_nents(skreq->dst);
+		ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
+					   &splits_out, &splits_out_nents,
+					   sec_req->len_out, info->dev);
+		if (ret)
+			goto err_unmap_in_sg;
+	}
+	/* Shared info stored in seq_req - applies to all BDs */
+	sec_req->tfm_ctx = ctx;
+	sec_req->cb = sec_skcipher_alg_callback;
+	INIT_LIST_HEAD(&sec_req->elements);
+
+	/*
+	 * Future optimization.
+	 * In the chaining case we can't use a dma pool bounce buffer
+	 * but in the case where we know there is no chaining we can
+	 */
+	if (crypto_skcipher_ivsize(atfm)) {
+		sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
+						 crypto_skcipher_ivsize(atfm),
+						 DMA_TO_DEVICE);
+		if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
+			ret = -ENOMEM;
+			goto err_unmap_out_sg;
+		}
+	}
+
+	/* Set them all up then queue - cleaner error handling. */
+	for (i = 0; i < steps; i++) {
+		el = sec_alg_alloc_and_fill_el(&ctx->req_template,
+					       encrypt ? 1 : 0,
+					       split_sizes[i],
+					       skreq->src != skreq->dst,
+					       splits_in[i], splits_in_nents[i],
+					       split ? splits_out[i] : NULL,
+					       split ? splits_out_nents[i] : 0,
+					       info);
+		if (IS_ERR(el)) {
+			ret = PTR_ERR(el);
+			goto err_free_elements;
+		}
+		el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
+		el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
+		el->sec_req = sec_req;
+		list_add_tail(&el->head, &sec_req->elements);
+	}
+
+	/*
+	 * Only attempt to queue if the whole lot can fit in the queue -
+	 * we can't successfully cleanup after a partial queing so this
+	 * must succeed or fail atomically.
+	 *
+	 * Big hammer test of both software and hardware queues - could be
+	 * more refined but this is unlikely to happen so no need.
+	 */
+
+	/* Grab a big lock for a long time to avoid concurrency issues */
+	mutex_lock(&queue->queuelock);
+
+	/*
+	 * Can go on to queue if we have space in either:
+	 * 1) The hardware queue and no software queue
+	 * 2) The software queue
+	 * AND there is nothing in the backlog.  If there is backlog we
+	 * have to only queue to the backlog queue and return busy.
+	 */
+	if ((!sec_queue_can_enqueue(queue, steps) &&
+	     (!queue->havesoftqueue ||
+	      kfifo_avail(&queue->softqueue) > steps)) ||
+	    !list_empty(&ctx->backlog)) {
+		ret = -EBUSY;
+		if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+			list_add_tail(&sec_req->backlog_head, &ctx->backlog);
+			mutex_unlock(&queue->queuelock);
+			goto out;
+		}
+
+		mutex_unlock(&queue->queuelock);
+		goto err_free_elements;
+	}
+	ret = sec_send_request(sec_req, queue);
+	mutex_unlock(&queue->queuelock);
+	if (ret)
+		goto err_free_elements;
+
+	ret = -EINPROGRESS;
+out:
+	/* Cleanup - all elements in pointer arrays have been copied */
+	kfree(splits_in_nents);
+	kfree(splits_in);
+	kfree(splits_out_nents);
+	kfree(splits_out);
+	kfree(split_sizes);
+	return ret;
+
+err_free_elements:
+	list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+		list_del(&el->head);
+		sec_alg_free_el(el, info);
+	}
+	if (crypto_skcipher_ivsize(atfm))
+		dma_unmap_single(info->dev, sec_req->dma_iv,
+				 crypto_skcipher_ivsize(atfm),
+				 DMA_BIDIRECTIONAL);
+err_unmap_out_sg:
+	if (split)
+		sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
+				    splits_out_nents, sec_req->len_out,
+				    info->dev);
+err_unmap_in_sg:
+	sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
+			    sec_req->len_in, info->dev);
+err_free_split_sizes:
+	kfree(split_sizes);
+
+	return ret;
+}
+
+static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
+{
+	return sec_alg_skcipher_crypto(req, true);
+}
+
+static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
+{
+	return sec_alg_skcipher_crypto(req, false);
+}
+
+static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
+{
+	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	mutex_init(&ctx->lock);
+	INIT_LIST_HEAD(&ctx->backlog);
+	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
+
+	ctx->queue = sec_queue_alloc_start_safe();
+	if (IS_ERR(ctx->queue))
+		return PTR_ERR(ctx->queue);
+
+	mutex_init(&ctx->queue->queuelock);
+	ctx->queue->havesoftqueue = false;
+
+	return 0;
+}
+
+static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
+{
+	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct device *dev = ctx->queue->dev_info->dev;
+
+	if (ctx->key) {
+		memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
+		dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
+				  ctx->pkey);
+	}
+	sec_queue_stop_release(ctx->queue);
+}
+
+static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
+{
+	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int ret;
+
+	ret = sec_alg_skcipher_init(tfm);
+	if (ret)
+		return ret;
+
+	INIT_KFIFO(ctx->queue->softqueue);
+	ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
+	if (ret) {
+		sec_alg_skcipher_exit(tfm);
+		return ret;
+	}
+	ctx->queue->havesoftqueue = true;
+
+	return 0;
+}
+
+static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
+{
+	struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	kfifo_free(&ctx->queue->softqueue);
+	sec_alg_skcipher_exit(tfm);
+}
+
+static struct skcipher_alg sec_algs[] = {
+	{
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "hisi_sec_aes_ecb",
+			.cra_priority = 4001,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+			.cra_alignmask = 0,
+			.cra_module = THIS_MODULE,
+		},
+		.init = sec_alg_skcipher_init,
+		.exit = sec_alg_skcipher_exit,
+		.setkey = sec_alg_skcipher_setkey_aes_ecb,
+		.decrypt = sec_alg_skcipher_decrypt,
+		.encrypt = sec_alg_skcipher_encrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = 0,
+	}, {
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "hisi_sec_aes_cbc",
+			.cra_priority = 4001,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+			.cra_alignmask = 0,
+			.cra_module = THIS_MODULE,
+		},
+		.init = sec_alg_skcipher_init_with_queue,
+		.exit = sec_alg_skcipher_exit_with_queue,
+		.setkey = sec_alg_skcipher_setkey_aes_cbc,
+		.decrypt = sec_alg_skcipher_decrypt,
+		.encrypt = sec_alg_skcipher_encrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+	}, {
+		.base = {
+			.cra_name = "ctr(aes)",
+			.cra_driver_name = "hisi_sec_aes_ctr",
+			.cra_priority = 4001,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+			.cra_alignmask = 0,
+			.cra_module = THIS_MODULE,
+		},
+		.init = sec_alg_skcipher_init_with_queue,
+		.exit = sec_alg_skcipher_exit_with_queue,
+		.setkey = sec_alg_skcipher_setkey_aes_ctr,
+		.decrypt = sec_alg_skcipher_decrypt,
+		.encrypt = sec_alg_skcipher_encrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+	}, {
+		.base = {
+			.cra_name = "xts(aes)",
+			.cra_driver_name = "hisi_sec_aes_xts",
+			.cra_priority = 4001,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+			.cra_alignmask = 0,
+			.cra_module = THIS_MODULE,
+		},
+		.init = sec_alg_skcipher_init,
+		.exit = sec_alg_skcipher_exit,
+		.setkey = sec_alg_skcipher_setkey_aes_xts,
+		.decrypt = sec_alg_skcipher_decrypt,
+		.encrypt = sec_alg_skcipher_encrypt,
+		.min_keysize = 2 * AES_MIN_KEY_SIZE,
+		.max_keysize = 2 * AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+	}, {
+	/* Unable to find any test vectors so untested */
+		.base = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "hisi_sec_des_ecb",
+			.cra_priority = 4001,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+			.cra_alignmask = 0,
+			.cra_module = THIS_MODULE,
+		},
+		.init = sec_alg_skcipher_init,
+		.exit = sec_alg_skcipher_exit,
+		.setkey = sec_alg_skcipher_setkey_des_ecb,
+		.decrypt = sec_alg_skcipher_decrypt,
+		.encrypt = sec_alg_skcipher_encrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = 0,
+	}, {
+		.base = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "hisi_sec_des_cbc",
+			.cra_priority = 4001,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+			.cra_alignmask = 0,
+			.cra_module = THIS_MODULE,
+		},
+		.init = sec_alg_skcipher_init_with_queue,
+		.exit = sec_alg_skcipher_exit_with_queue,
+		.setkey = sec_alg_skcipher_setkey_des_cbc,
+		.decrypt = sec_alg_skcipher_decrypt,
+		.encrypt = sec_alg_skcipher_encrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = DES_BLOCK_SIZE,
+	}, {
+		.base = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "hisi_sec_3des_cbc",
+			.cra_priority = 4001,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+			.cra_alignmask = 0,
+			.cra_module = THIS_MODULE,
+		},
+		.init = sec_alg_skcipher_init_with_queue,
+		.exit = sec_alg_skcipher_exit_with_queue,
+		.setkey = sec_alg_skcipher_setkey_3des_cbc,
+		.decrypt = sec_alg_skcipher_decrypt,
+		.encrypt = sec_alg_skcipher_encrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+	}, {
+		.base = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "hisi_sec_3des_ecb",
+			.cra_priority = 4001,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+			.cra_alignmask = 0,
+			.cra_module = THIS_MODULE,
+		},
+		.init = sec_alg_skcipher_init,
+		.exit = sec_alg_skcipher_exit,
+		.setkey = sec_alg_skcipher_setkey_3des_ecb,
+		.decrypt = sec_alg_skcipher_decrypt,
+		.encrypt = sec_alg_skcipher_encrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = 0,
+	}
+};
+
+int sec_algs_register(void)
+{
+	int ret = 0;
+
+	mutex_lock(&algs_lock);
+	if (++active_devs != 1)
+		goto unlock;
+
+	ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+	if (ret)
+		--active_devs;
+unlock:
+	mutex_unlock(&algs_lock);
+
+	return ret;
+}
+
+void sec_algs_unregister(void)
+{
+	mutex_lock(&algs_lock);
+	if (--active_devs != 0)
+		goto unlock;
+	crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+
+unlock:
+	mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
new file mode 100644
index 0000000..c1ee4e7
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -0,0 +1,1323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ *
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ */
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sec_drv.h"
+
+#define SEC_QUEUE_AR_FROCE_ALLOC			0
+#define SEC_QUEUE_AR_FROCE_NOALLOC			1
+#define SEC_QUEUE_AR_FROCE_DIS				2
+
+#define SEC_QUEUE_AW_FROCE_ALLOC			0
+#define SEC_QUEUE_AW_FROCE_NOALLOC			1
+#define SEC_QUEUE_AW_FROCE_DIS				2
+
+/* SEC_ALGSUB registers */
+#define SEC_ALGSUB_CLK_EN_REG				0x03b8
+#define SEC_ALGSUB_CLK_DIS_REG				0x03bc
+#define SEC_ALGSUB_CLK_ST_REG				0x535c
+#define SEC_ALGSUB_RST_REQ_REG				0x0aa8
+#define SEC_ALGSUB_RST_DREQ_REG				0x0aac
+#define SEC_ALGSUB_RST_ST_REG				0x5a54
+#define   SEC_ALGSUB_RST_ST_IS_RST			BIT(0)
+
+#define SEC_ALGSUB_BUILD_RST_REQ_REG			0x0ab8
+#define SEC_ALGSUB_BUILD_RST_DREQ_REG			0x0abc
+#define SEC_ALGSUB_BUILD_RST_ST_REG			0x5a5c
+#define   SEC_ALGSUB_BUILD_RST_ST_IS_RST		BIT(0)
+
+#define SEC_SAA_BASE					0x00001000UL
+
+/* SEC_SAA registers */
+#define SEC_SAA_CTRL_REG(x)	((x) * SEC_SAA_ADDR_SIZE)
+#define   SEC_SAA_CTRL_GET_QM_EN			BIT(0)
+
+#define SEC_ST_INTMSK1_REG				0x0200
+#define SEC_ST_RINT1_REG				0x0400
+#define SEC_ST_INTSTS1_REG				0x0600
+#define SEC_BD_MNG_STAT_REG				0x0800
+#define SEC_PARSING_STAT_REG				0x0804
+#define SEC_LOAD_TIME_OUT_CNT_REG			0x0808
+#define SEC_CORE_WORK_TIME_OUT_CNT_REG			0x080c
+#define SEC_BACK_TIME_OUT_CNT_REG			0x0810
+#define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG		0x0814
+#define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG		0x0818
+#define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG		0x081c
+#define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG		0x0820
+#define SEC_SAA_ACC_REG					0x083c
+#define SEC_BD_NUM_CNT_IN_SEC_REG			0x0858
+#define SEC_LOAD_WORK_TIME_CNT_REG			0x0860
+#define SEC_CORE_WORK_WORK_TIME_CNT_REG			0x0864
+#define SEC_BACK_WORK_TIME_CNT_REG			0x0868
+#define SEC_SAA_IDLE_TIME_CNT_REG			0x086c
+#define SEC_SAA_CLK_CNT_REG				0x0870
+
+/* SEC_COMMON registers */
+#define SEC_CLK_EN_REG					0x0000
+#define SEC_CTRL_REG					0x0004
+
+#define SEC_COMMON_CNT_CLR_CE_REG			0x0008
+#define   SEC_COMMON_CNT_CLR_CE_CLEAR			BIT(0)
+#define   SEC_COMMON_CNT_CLR_CE_SNAP_EN			BIT(1)
+
+#define SEC_SECURE_CTRL_REG				0x000c
+#define SEC_AXI_CACHE_CFG_REG				0x0010
+#define SEC_AXI_QOS_CFG_REG				0x0014
+#define SEC_IPV4_MASK_TABLE_REG				0x0020
+#define SEC_IPV6_MASK_TABLE_X_REG(x)	(0x0024 + (x) * 4)
+#define SEC_FSM_MAX_CNT_REG				0x0064
+
+#define SEC_CTRL2_REG					0x0068
+#define   SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M		GENMASK(3, 0)
+#define   SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S		0
+#define   SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M		GENMASK(6, 4)
+#define   SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S		4
+#define   SEC_CTRL2_CLK_GATE_EN				BIT(7)
+#define   SEC_CTRL2_ENDIAN_BD				BIT(8)
+#define   SEC_CTRL2_ENDIAN_BD_TYPE			BIT(9)
+
+#define SEC_CNT_PRECISION_CFG_REG			0x006c
+#define SEC_DEBUG_BD_CFG_REG				0x0070
+#define   SEC_DEBUG_BD_CFG_WB_NORMAL			BIT(0)
+#define   SEC_DEBUG_BD_CFG_WB_EN			BIT(1)
+
+#define SEC_Q_SIGHT_SEL					0x0074
+#define SEC_Q_SIGHT_HIS_CLR				0x0078
+#define SEC_Q_VMID_CFG_REG(q)		(0x0100 + (q) * 4)
+#define SEC_Q_WEIGHT_CFG_REG(q)		(0x200 + (q) * 4)
+#define SEC_STAT_CLR_REG				0x0a00
+#define SEC_SAA_IDLE_CNT_CLR_REG			0x0a04
+#define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG			0x0b00
+#define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG		0x0b04
+#define SEC_QM_BD_DFX_CFG_REG				0x0b08
+#define SEC_QM_BD_DFX_RESULT_REG			0x0b0c
+#define SEC_QM_BDID_DFX_RESULT_REG			0x0b10
+#define SEC_QM_BD_DFIFO_STATUS_REG			0x0b14
+#define SEC_QM_BD_DFX_CFG2_REG				0x0b1c
+#define SEC_QM_BD_DFX_RESULT2_REG			0x0b20
+#define SEC_QM_BD_IDFIFO_STATUS_REG			0x0b18
+#define SEC_QM_BD_DFIFO_STATUS2_REG			0x0b28
+#define SEC_QM_BD_IDFIFO_STATUS2_REG			0x0b2c
+
+#define SEC_HASH_IPV4_MASK				0xfff00000
+#define SEC_MAX_SAA_NUM					0xa
+#define SEC_SAA_ADDR_SIZE				0x1000
+
+#define SEC_Q_INIT_REG					0x0
+#define   SEC_Q_INIT_WO_STAT_CLEAR			0x2
+#define   SEC_Q_INIT_AND_STAT_CLEAR			0x3
+
+#define SEC_Q_CFG_REG					0x8
+#define   SEC_Q_CFG_REORDER				BIT(0)
+
+#define SEC_Q_PROC_NUM_CFG_REG				0x10
+#define SEC_QUEUE_ENB_REG				0x18
+
+#define SEC_Q_DEPTH_CFG_REG				0x50
+#define   SEC_Q_DEPTH_CFG_DEPTH_M			GENMASK(11, 0)
+#define   SEC_Q_DEPTH_CFG_DEPTH_S			0
+
+#define SEC_Q_BASE_HADDR_REG				0x54
+#define SEC_Q_BASE_LADDR_REG				0x58
+#define SEC_Q_WR_PTR_REG				0x5c
+#define SEC_Q_OUTORDER_BASE_HADDR_REG			0x60
+#define SEC_Q_OUTORDER_BASE_LADDR_REG			0x64
+#define SEC_Q_OUTORDER_RD_PTR_REG			0x68
+#define SEC_Q_OT_TH_REG					0x6c
+
+#define SEC_Q_ARUSER_CFG_REG				0x70
+#define   SEC_Q_ARUSER_CFG_FA				BIT(0)
+#define   SEC_Q_ARUSER_CFG_FNA				BIT(1)
+#define   SEC_Q_ARUSER_CFG_RINVLD			BIT(2)
+#define   SEC_Q_ARUSER_CFG_PKG				BIT(3)
+
+#define SEC_Q_AWUSER_CFG_REG				0x74
+#define   SEC_Q_AWUSER_CFG_FA				BIT(0)
+#define   SEC_Q_AWUSER_CFG_FNA				BIT(1)
+#define   SEC_Q_AWUSER_CFG_PKG				BIT(2)
+
+#define SEC_Q_ERR_BASE_HADDR_REG			0x7c
+#define SEC_Q_ERR_BASE_LADDR_REG			0x80
+#define SEC_Q_CFG_VF_NUM_REG				0x84
+#define SEC_Q_SOFT_PROC_PTR_REG				0x88
+#define SEC_Q_FAIL_INT_MSK_REG				0x300
+#define SEC_Q_FLOW_INT_MKS_REG				0x304
+#define SEC_Q_FAIL_RINT_REG				0x400
+#define SEC_Q_FLOW_RINT_REG				0x404
+#define SEC_Q_FAIL_INT_STATUS_REG			0x500
+#define SEC_Q_FLOW_INT_STATUS_REG			0x504
+#define SEC_Q_STATUS_REG				0x600
+#define SEC_Q_RD_PTR_REG				0x604
+#define SEC_Q_PRO_PTR_REG				0x608
+#define SEC_Q_OUTORDER_WR_PTR_REG			0x60c
+#define SEC_Q_OT_CNT_STATUS_REG				0x610
+#define SEC_Q_INORDER_BD_NUM_ST_REG			0x650
+#define SEC_Q_INORDER_GET_FLAG_ST_REG			0x654
+#define SEC_Q_INORDER_ADD_FLAG_ST_REG			0x658
+#define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG		0x65c
+#define SEC_Q_RD_DONE_PTR_REG				0x660
+#define SEC_Q_CPL_Q_BD_NUM_ST_REG			0x700
+#define SEC_Q_CPL_Q_PTR_ST_REG				0x704
+#define SEC_Q_CPL_Q_H_ADDR_ST_REG			0x708
+#define SEC_Q_CPL_Q_L_ADDR_ST_REG			0x70c
+#define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG		0x710
+#define SEC_Q_WRR_ID_CHECK_REG				0x714
+#define SEC_Q_CPLQ_FULL_CHECK_REG			0x718
+#define SEC_Q_SUCCESS_BD_CNT_REG			0x800
+#define SEC_Q_FAIL_BD_CNT_REG				0x804
+#define SEC_Q_GET_BD_CNT_REG				0x808
+#define SEC_Q_IVLD_CNT_REG				0x80c
+#define SEC_Q_BD_PROC_GET_CNT_REG			0x810
+#define SEC_Q_BD_PROC_DONE_CNT_REG			0x814
+#define SEC_Q_LAT_CLR_REG				0x850
+#define SEC_Q_PKT_LAT_MAX_REG				0x854
+#define SEC_Q_PKT_LAT_AVG_REG				0x858
+#define SEC_Q_PKT_LAT_MIN_REG				0x85c
+#define SEC_Q_ID_CLR_CFG_REG				0x900
+#define SEC_Q_1ST_BD_ERR_ID_REG				0x904
+#define SEC_Q_1ST_AUTH_FAIL_ID_REG			0x908
+#define SEC_Q_1ST_RD_ERR_ID_REG				0x90c
+#define SEC_Q_1ST_ECC2_ERR_ID_REG			0x910
+#define SEC_Q_1ST_IVLD_ID_REG				0x914
+#define SEC_Q_1ST_BD_WR_ERR_ID_REG			0x918
+#define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG			0x91c
+#define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG			0x920
+
+struct sec_debug_bd_info {
+#define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M	GENMASK(22, 0)
+	u32 soft_err_check;
+#define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M	GENMASK(9, 0)
+	u32 hard_err_check;
+	u32 icv_mac1st_word;
+#define SEC_DEBUG_BD_INFO_GET_ID_M		GENMASK(19, 0)
+	u32 sec_get_id;
+	/* W4---W15 */
+	u32 reserv_left[12];
+};
+
+struct sec_out_bd_info	{
+#define SEC_OUT_BD_INFO_Q_ID_M			GENMASK(11, 0)
+#define SEC_OUT_BD_INFO_ECC_2BIT_ERR		BIT(14)
+	u16 data;
+};
+
+#define SEC_MAX_DEVICES				8
+static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES];
+static DEFINE_MUTEX(sec_id_lock);
+
+static int sec_queue_map_io(struct sec_queue *queue)
+{
+	struct device *dev = queue->dev_info->dev;
+	struct resource *res;
+
+	res = platform_get_resource(to_platform_device(dev),
+				    IORESOURCE_MEM,
+				    2 + queue->queue_id);
+	if (!res) {
+		dev_err(dev, "Failed to get queue %d memory resource\n",
+			queue->queue_id);
+		return -ENOMEM;
+	}
+	queue->regs = ioremap(res->start, resource_size(res));
+	if (!queue->regs)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void sec_queue_unmap_io(struct sec_queue *queue)
+{
+	 iounmap(queue->regs);
+}
+
+static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
+{
+	void __iomem *addr = queue->regs +  SEC_Q_ARUSER_CFG_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	if (ar_pkg)
+		regval |= SEC_Q_ARUSER_CFG_PKG;
+	else
+		regval &= ~SEC_Q_ARUSER_CFG_PKG;
+	writel_relaxed(regval, addr);
+
+	return 0;
+}
+
+static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
+{
+	void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	regval |= SEC_Q_AWUSER_CFG_PKG;
+	writel_relaxed(regval, addr);
+
+	return 0;
+}
+
+static int sec_clk_en(struct sec_dev_info *info)
+{
+	void __iomem *base = info->regs[SEC_COMMON];
+	u32 i = 0;
+
+	writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG);
+	do {
+		usleep_range(1000, 10000);
+		if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7)
+			return 0;
+		i++;
+	} while (i < 10);
+	dev_err(info->dev, "sec clock enable fail!\n");
+
+	return -EIO;
+}
+
+static int sec_clk_dis(struct sec_dev_info *info)
+{
+	void __iomem *base = info->regs[SEC_COMMON];
+	u32 i = 0;
+
+	writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG);
+	do {
+		usleep_range(1000, 10000);
+		if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0)
+			return 0;
+		i++;
+	} while (i < 10);
+	dev_err(info->dev, "sec clock disable fail!\n");
+
+	return -EIO;
+}
+
+static int sec_reset_whole_module(struct sec_dev_info *info)
+{
+	void __iomem *base = info->regs[SEC_COMMON];
+	bool is_reset, b_is_reset;
+	u32 i = 0;
+
+	writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG);
+	writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG);
+	while (1) {
+		usleep_range(1000, 10000);
+		is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+			SEC_ALGSUB_RST_ST_IS_RST;
+		b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+			SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+		if (is_reset && b_is_reset)
+			break;
+		i++;
+		if (i > 10) {
+			dev_err(info->dev, "Reset req failed\n");
+			return -EIO;
+		}
+	}
+
+	i = 0;
+	writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG);
+	writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG);
+	while (1) {
+		usleep_range(1000, 10000);
+		is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+			SEC_ALGSUB_RST_ST_IS_RST;
+		b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+			SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+		if (!is_reset && !b_is_reset)
+			break;
+
+		i++;
+		if (i > 10) {
+			dev_err(info->dev, "Reset dreq failed\n");
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static void sec_bd_endian_little(struct sec_dev_info *info)
+{
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE);
+	writel_relaxed(regval, addr);
+}
+
+/*
+ * sec_cache_config - configure optimum cache placement
+ */
+static void sec_cache_config(struct sec_dev_info *info)
+{
+	struct iommu_domain *domain;
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG;
+
+	domain = iommu_get_domain_for_dev(info->dev);
+
+	/* Check that translation is occurring */
+	if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+		writel_relaxed(0x44cf9e, addr);
+	else
+		writel_relaxed(0x4cfd9, addr);
+}
+
+static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+	regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) &
+		SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+	writel_relaxed(regval, addr);
+}
+
+static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+	regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) &
+		SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+	writel_relaxed(regval, addr);
+}
+
+static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate)
+{
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	if (clkgate)
+		regval |= SEC_CTRL2_CLK_GATE_EN;
+	else
+		regval &= ~SEC_CTRL2_CLK_GATE_EN;
+	writel_relaxed(regval, addr);
+}
+
+static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce)
+{
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	if (clr_ce)
+		regval |= SEC_COMMON_CNT_CLR_CE_CLEAR;
+	else
+		regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR;
+	writel_relaxed(regval, addr);
+}
+
+static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en)
+{
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	if (snap_en)
+		regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+	else
+		regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+	writel_relaxed(regval, addr);
+}
+
+static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
+{
+	void __iomem *base = info->regs[SEC_SAA];
+	int i;
+
+	for (i = 0; i < 10; i++)
+		writel_relaxed(hash_mask[0],
+			       base + SEC_IPV6_MASK_TABLE_X_REG(i));
+}
+
+static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
+{
+	if (hash_mask & SEC_HASH_IPV4_MASK) {
+		dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
+		return -EINVAL;
+	}
+
+	writel_relaxed(hash_mask,
+		       info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG);
+
+	return 0;
+}
+
+static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	/* Always disable write back of normal bd */
+	regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL;
+
+	if (cfg)
+		regval &= ~SEC_DEBUG_BD_CFG_WB_EN;
+	else
+		regval |= SEC_DEBUG_BD_CFG_WB_EN;
+
+	writel_relaxed(regval, addr);
+}
+
+static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en)
+{
+	void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE +
+		SEC_SAA_CTRL_REG(saa_indx);
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	if (en)
+		regval |= SEC_SAA_CTRL_GET_QM_EN;
+	else
+		regval &= ~SEC_SAA_CTRL_GET_QM_EN;
+	writel_relaxed(regval, addr);
+}
+
+static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx,
+			     u32 saa_int_mask)
+{
+	writel_relaxed(saa_int_mask,
+		       info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG +
+		       saa_indx * SEC_SAA_ADDR_SIZE);
+}
+
+static void sec_streamid(struct sec_dev_info *info, int i)
+{
+	#define SEC_SID 0x600
+	#define SEC_VMID 0
+
+	writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)),
+		       info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i));
+}
+
+static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
+{
+	void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) {
+		regval |= SEC_Q_ARUSER_CFG_FA;
+		regval &= ~SEC_Q_ARUSER_CFG_FNA;
+	} else {
+		regval &= ~SEC_Q_ARUSER_CFG_FA;
+		regval |= SEC_Q_ARUSER_CFG_FNA;
+	}
+
+	writel_relaxed(regval, addr);
+}
+
+static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
+{
+	void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) {
+		regval |= SEC_Q_AWUSER_CFG_FA;
+		regval &= ~SEC_Q_AWUSER_CFG_FNA;
+	} else {
+		regval &= ~SEC_Q_AWUSER_CFG_FA;
+		regval |= SEC_Q_AWUSER_CFG_FNA;
+	}
+
+	writel_relaxed(regval, addr);
+}
+
+static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
+{
+	void __iomem *base = queue->regs;
+	u32 regval;
+
+	regval = readl_relaxed(base + SEC_Q_CFG_REG);
+	if (reorder)
+		regval |= SEC_Q_CFG_REORDER;
+	else
+		regval &= ~SEC_Q_CFG_REORDER;
+	writel_relaxed(regval, base + SEC_Q_CFG_REG);
+}
+
+static void sec_queue_depth(struct sec_queue *queue, u32 depth)
+{
+	void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
+	u32 regval;
+
+	regval = readl_relaxed(addr);
+	regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M;
+	regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M;
+
+	writel_relaxed(regval, addr);
+}
+
+static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
+{
+	writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
+	writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
+}
+
+static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
+{
+	writel_relaxed(upper_32_bits(addr),
+		       queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
+	writel_relaxed(lower_32_bits(addr),
+		       queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
+}
+
+static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
+{
+	writel_relaxed(upper_32_bits(addr),
+		       queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
+	writel_relaxed(lower_32_bits(addr),
+		       queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
+}
+
+static void sec_queue_irq_disable(struct sec_queue *queue)
+{
+	writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_irq_enable(struct sec_queue *queue)
+{
+	writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_abn_irq_disable(struct sec_queue *queue)
+{
+	writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
+}
+
+static void sec_queue_stop(struct sec_queue *queue)
+{
+	disable_irq(queue->task_irq);
+	sec_queue_irq_disable(queue);
+	writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static void sec_queue_start(struct sec_queue *queue)
+{
+	sec_queue_irq_enable(queue);
+	enable_irq(queue->task_irq);
+	queue->expected = 0;
+	writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+	writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info)
+{
+	int i;
+
+	mutex_lock(&info->dev_lock);
+
+	/* Get the first idle queue in SEC device */
+	for (i = 0; i < SEC_Q_NUM; i++)
+		if (!info->queues[i].in_use) {
+			info->queues[i].in_use = true;
+			info->queues_in_use++;
+			mutex_unlock(&info->dev_lock);
+
+			return &info->queues[i];
+		}
+	mutex_unlock(&info->dev_lock);
+
+	return ERR_PTR(-ENODEV);
+}
+
+static int sec_queue_free(struct sec_queue *queue)
+{
+	struct sec_dev_info *info = queue->dev_info;
+
+	if (queue->queue_id >= SEC_Q_NUM) {
+		dev_err(info->dev, "No queue %d\n", queue->queue_id);
+		return -ENODEV;
+	}
+
+	if (!queue->in_use) {
+		dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+		return -ENODEV;
+	}
+
+	mutex_lock(&info->dev_lock);
+	queue->in_use = false;
+	info->queues_in_use--;
+	mutex_unlock(&info->dev_lock);
+
+	return 0;
+}
+
+static irqreturn_t sec_isr_handle_th(int irq, void *q)
+{
+	sec_queue_irq_disable(q);
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t sec_isr_handle(int irq, void *q)
+{
+	struct sec_queue *queue = q;
+	struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+	struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
+	struct sec_out_bd_info *outorder_msg;
+	struct sec_bd_info *msg;
+	u32 ooo_read, ooo_write;
+	void __iomem *base = queue->regs;
+	int q_id;
+
+	ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG);
+	ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+	outorder_msg = cq_ring->vaddr + ooo_read;
+	q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+	msg = msg_ring->vaddr + q_id;
+
+	while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) {
+		/*
+		 * Must be before callback otherwise blocks adding other chained
+		 * elements
+		 */
+		set_bit(q_id, queue->unprocessed);
+		if (q_id == queue->expected)
+			while (test_bit(queue->expected, queue->unprocessed)) {
+				clear_bit(queue->expected, queue->unprocessed);
+				msg = msg_ring->vaddr + queue->expected;
+				msg->w0 &= ~SEC_BD_W0_DONE;
+				msg_ring->callback(msg,
+						queue->shadow[queue->expected]);
+				queue->shadow[queue->expected] = NULL;
+				queue->expected = (queue->expected + 1) %
+					SEC_QUEUE_LEN;
+				atomic_dec(&msg_ring->used);
+			}
+
+		ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN;
+		writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG);
+		ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+		outorder_msg = cq_ring->vaddr + ooo_read;
+		q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+		msg = msg_ring->vaddr + q_id;
+	}
+
+	sec_queue_irq_enable(queue);
+
+	return IRQ_HANDLED;
+}
+
+static int sec_queue_irq_init(struct sec_queue *queue)
+{
+	struct sec_dev_info *info = queue->dev_info;
+	int irq = queue->task_irq;
+	int ret;
+
+	ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle,
+				   IRQF_TRIGGER_RISING, queue->name, queue);
+	if (ret) {
+		dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret);
+		return ret;
+	}
+	disable_irq(irq);
+
+	return 0;
+}
+
+static int sec_queue_irq_uninit(struct sec_queue *queue)
+{
+	free_irq(queue->task_irq, queue);
+
+	return 0;
+}
+
+static struct sec_dev_info *sec_device_get(void)
+{
+	struct sec_dev_info *sec_dev = NULL;
+	struct sec_dev_info *this_sec_dev;
+	int least_busy_n = SEC_Q_NUM + 1;
+	int i;
+
+	/* Find which one is least busy and use that first */
+	for (i = 0; i < SEC_MAX_DEVICES; i++) {
+		this_sec_dev = sec_devices[i];
+		if (this_sec_dev &&
+		    this_sec_dev->queues_in_use < least_busy_n) {
+			least_busy_n = this_sec_dev->queues_in_use;
+			sec_dev = this_sec_dev;
+		}
+	}
+
+	return sec_dev;
+}
+
+static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info)
+{
+	struct sec_queue *queue;
+
+	queue = sec_alloc_queue(info);
+	if (IS_ERR(queue)) {
+		dev_err(info->dev, "alloc sec queue failed! %ld\n",
+			PTR_ERR(queue));
+		return queue;
+	}
+
+	sec_queue_start(queue);
+
+	return queue;
+}
+
+/**
+ * sec_queue_alloc_start_safe - get a hw queue from appropriate instance
+ *
+ * This function does extremely simplistic load balancing. It does not take into
+ * account NUMA locality of the accelerator, or which cpu has requested the
+ * queue.  Future work may focus on optimizing this in order to improve full
+ * machine throughput.
+ */
+struct sec_queue *sec_queue_alloc_start_safe(void)
+{
+	struct sec_dev_info *info;
+	struct sec_queue *queue = ERR_PTR(-ENODEV);
+
+	mutex_lock(&sec_id_lock);
+	info = sec_device_get();
+	if (!info)
+		goto unlock;
+
+	queue = sec_queue_alloc_start(info);
+
+unlock:
+	mutex_unlock(&sec_id_lock);
+
+	return queue;
+}
+
+/**
+ * sec_queue_stop_release() - free up a hw queue for reuse
+ * @queue: The queue we are done with.
+ *
+ * This will stop the current queue, terminanting any transactions
+ * that are inflight an return it to the pool of available hw queuess
+ */
+int sec_queue_stop_release(struct sec_queue *queue)
+{
+	struct device *dev = queue->dev_info->dev;
+	int ret;
+
+	sec_queue_stop(queue);
+
+	ret = sec_queue_free(queue);
+	if (ret)
+		dev_err(dev, "Releasing queue failed %d\n", ret);
+
+	return ret;
+}
+
+/**
+ * sec_queue_empty() - Is this hardware queue currently empty.
+ *
+ * We need to know if we have an empty queue for some of the chaining modes
+ * as if it is not empty we may need to hold the message in a software queue
+ * until the hw queue is drained.
+ */
+bool sec_queue_empty(struct sec_queue *queue)
+{
+	struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+	return !atomic_read(&msg_ring->used);
+}
+
+/**
+ * sec_queue_send() - queue up a single operation in the hw queue
+ * @queue: The queue in which to put the message
+ * @msg: The message
+ * @ctx: Context to be put in the shadow array and passed back to cb on result.
+ *
+ * This function will return -EAGAIN if the queue is currently full.
+ */
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
+{
+	struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+	void __iomem *base = queue->regs;
+	u32 write, read;
+
+	mutex_lock(&msg_ring->lock);
+	read = readl(base + SEC_Q_RD_PTR_REG);
+	write = readl(base + SEC_Q_WR_PTR_REG);
+	if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) {
+		mutex_unlock(&msg_ring->lock);
+		return -EAGAIN;
+	}
+	memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));
+	queue->shadow[write] = ctx;
+	write = (write + 1) % SEC_QUEUE_LEN;
+
+	/* Ensure content updated before queue advance */
+	wmb();
+	writel(write, base + SEC_Q_WR_PTR_REG);
+
+	atomic_inc(&msg_ring->used);
+	mutex_unlock(&msg_ring->lock);
+
+	return 0;
+}
+
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
+{
+	struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+	return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num;
+}
+
+static void sec_queue_hw_init(struct sec_queue *queue)
+{
+	sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+	sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+	sec_queue_ar_pkgattr(queue, 1);
+	sec_queue_aw_pkgattr(queue, 1);
+
+	/* Enable out of order queue */
+	sec_queue_reorder(queue, true);
+
+	/* Interrupt after a single complete element */
+	writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
+
+	sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
+
+	sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
+
+	sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
+
+	sec_queue_errbase_addr(queue, queue->ring_db.paddr);
+
+	writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
+
+	sec_queue_abn_irq_disable(queue);
+	sec_queue_irq_disable(queue);
+	writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+}
+
+static int sec_hw_init(struct sec_dev_info *info)
+{
+	struct iommu_domain *domain;
+	u32 sec_ipv4_mask = 0;
+	u32 sec_ipv6_mask[10] = {};
+	u32 i, ret;
+
+	domain = iommu_get_domain_for_dev(info->dev);
+
+	/*
+	 * Enable all available processing unit clocks.
+	 * Only the first cluster is usable with translations.
+	 */
+	if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+		info->num_saas = 5;
+
+	else
+		info->num_saas = 10;
+
+	writel_relaxed(GENMASK(info->num_saas - 1, 0),
+		       info->regs[SEC_SAA] + SEC_CLK_EN_REG);
+
+	/* 32 bit little endian */
+	sec_bd_endian_little(info);
+
+	sec_cache_config(info);
+
+	/* Data axi port write and read outstanding config as per datasheet */
+	sec_data_axiwr_otsd_cfg(info, 0x7);
+	sec_data_axird_otsd_cfg(info, 0x7);
+
+	/* Enable clock gating */
+	sec_clk_gate_en(info, true);
+
+	/* Set CNT_CYC register not read clear */
+	sec_comm_cnt_cfg(info, false);
+
+	/* Enable CNT_CYC */
+	sec_commsnap_en(info, false);
+
+	writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG);
+
+	ret = sec_ipv4_hashmask(info, sec_ipv4_mask);
+	if (ret) {
+		dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret);
+		return -EIO;
+	}
+
+	sec_ipv6_hashmask(info, sec_ipv6_mask);
+
+	/*  do not use debug bd */
+	sec_set_dbg_bd_cfg(info, 0);
+
+	if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) {
+		for (i = 0; i < SEC_Q_NUM; i++) {
+			sec_streamid(info, i);
+			/* Same QoS for all queues */
+			writel_relaxed(0x3f,
+				       info->regs[SEC_SAA] +
+				       SEC_Q_WEIGHT_CFG_REG(i));
+		}
+	}
+
+	for (i = 0; i < info->num_saas; i++) {
+		sec_saa_getqm_en(info, i, 1);
+		sec_saa_int_mask(info, i, 0);
+	}
+
+	return 0;
+}
+
+static void sec_hw_exit(struct sec_dev_info *info)
+{
+	int i;
+
+	for (i = 0; i < SEC_MAX_SAA_NUM; i++) {
+		sec_saa_int_mask(info, i, (u32)~0);
+		sec_saa_getqm_en(info, i, 0);
+	}
+}
+
+static void sec_queue_base_init(struct sec_dev_info *info,
+				struct sec_queue *queue, int queue_id)
+{
+	queue->dev_info = info;
+	queue->queue_id = queue_id;
+	snprintf(queue->name, sizeof(queue->name),
+		 "%s_%d", dev_name(info->dev), queue->queue_id);
+}
+
+static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev)
+{
+	struct resource *res;
+	int i;
+
+	for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+
+		if (!res) {
+			dev_err(info->dev, "Memory resource %d not found\n", i);
+			return -EINVAL;
+		}
+
+		info->regs[i] = devm_ioremap(info->dev, res->start,
+					     resource_size(res));
+		if (!info->regs[i]) {
+			dev_err(info->dev,
+				"Memory resource %d could not be remapped\n",
+				i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int sec_base_init(struct sec_dev_info *info,
+			 struct platform_device *pdev)
+{
+	int ret;
+
+	ret = sec_map_io(info, pdev);
+	if (ret)
+		return ret;
+
+	ret = sec_clk_en(info);
+	if (ret)
+		return ret;
+
+	ret = sec_reset_whole_module(info);
+	if (ret)
+		goto sec_clk_disable;
+
+	ret = sec_hw_init(info);
+	if (ret)
+		goto sec_clk_disable;
+
+	return 0;
+
+sec_clk_disable:
+	sec_clk_dis(info);
+
+	return ret;
+}
+
+static void sec_base_exit(struct sec_dev_info *info)
+{
+	sec_hw_exit(info);
+	sec_clk_dis(info);
+}
+
+#define SEC_Q_CMD_SIZE \
+	round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
+#define SEC_Q_CQ_SIZE \
+	round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
+#define SEC_Q_DB_SIZE \
+	round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
+
+static int sec_queue_res_cfg(struct sec_queue *queue)
+{
+	struct device *dev = queue->dev_info->dev;
+	struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
+	struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
+	struct sec_queue_ring_db *ring_db = &queue->ring_db;
+	int ret;
+
+	ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
+					      &ring_cmd->paddr,
+					      GFP_KERNEL);
+	if (!ring_cmd->vaddr)
+		return -ENOMEM;
+
+	atomic_set(&ring_cmd->used, 0);
+	mutex_init(&ring_cmd->lock);
+	ring_cmd->callback = sec_alg_callback;
+
+	ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
+					     &ring_cq->paddr,
+					     GFP_KERNEL);
+	if (!ring_cq->vaddr) {
+		ret = -ENOMEM;
+		goto err_free_ring_cmd;
+	}
+
+	ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
+					     &ring_db->paddr,
+					     GFP_KERNEL);
+	if (!ring_db->vaddr) {
+		ret = -ENOMEM;
+		goto err_free_ring_cq;
+	}
+	queue->task_irq = platform_get_irq(to_platform_device(dev),
+					   queue->queue_id * 2 + 1);
+	if (queue->task_irq <= 0) {
+		ret = -EINVAL;
+		goto err_free_ring_db;
+	}
+
+	return 0;
+
+err_free_ring_db:
+	dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+			  queue->ring_db.paddr);
+err_free_ring_cq:
+	dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+			  queue->ring_cq.paddr);
+err_free_ring_cmd:
+	dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+			  queue->ring_cmd.paddr);
+
+	return ret;
+}
+
+static void sec_queue_free_ring_pages(struct sec_queue *queue)
+{
+	struct device *dev = queue->dev_info->dev;
+
+	dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+			  queue->ring_db.paddr);
+	dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+			  queue->ring_cq.paddr);
+	dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+			  queue->ring_cmd.paddr);
+}
+
+static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
+			    int queue_id)
+{
+	int ret;
+
+	sec_queue_base_init(info, queue, queue_id);
+
+	ret = sec_queue_res_cfg(queue);
+	if (ret)
+		return ret;
+
+	ret = sec_queue_map_io(queue);
+	if (ret) {
+		dev_err(info->dev, "Queue map failed %d\n", ret);
+		sec_queue_free_ring_pages(queue);
+		return ret;
+	}
+
+	sec_queue_hw_init(queue);
+
+	return 0;
+}
+
+static void sec_queue_unconfig(struct sec_dev_info *info,
+			       struct sec_queue *queue)
+{
+	sec_queue_unmap_io(queue);
+	sec_queue_free_ring_pages(queue);
+}
+
+static int sec_id_alloc(struct sec_dev_info *info)
+{
+	int ret = 0;
+	int i;
+
+	mutex_lock(&sec_id_lock);
+
+	for (i = 0; i < SEC_MAX_DEVICES; i++)
+		if (!sec_devices[i])
+			break;
+	if (i == SEC_MAX_DEVICES) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+	info->sec_id = i;
+	sec_devices[info->sec_id] = info;
+
+unlock:
+	mutex_unlock(&sec_id_lock);
+
+	return ret;
+}
+
+static void sec_id_free(struct sec_dev_info *info)
+{
+	mutex_lock(&sec_id_lock);
+	sec_devices[info->sec_id] = NULL;
+	mutex_unlock(&sec_id_lock);
+}
+
+static int sec_probe(struct platform_device *pdev)
+{
+	struct sec_dev_info *info;
+	struct device *dev = &pdev->dev;
+	int i, j;
+	int ret;
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret) {
+		dev_err(dev, "Failed to set 64 bit dma mask %d", ret);
+		return -ENODEV;
+	}
+
+	info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = dev;
+	mutex_init(&info->dev_lock);
+
+	info->hw_sgl_pool = dmam_pool_create("sgl", dev,
+					     sizeof(struct sec_hw_sgl), 64, 0);
+	if (!info->hw_sgl_pool) {
+		dev_err(dev, "Failed to create sec sgl dma pool\n");
+		return -ENOMEM;
+	}
+
+	ret = sec_base_init(info, pdev);
+	if (ret) {
+		dev_err(dev, "Base initialization fail! %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < SEC_Q_NUM; i++) {
+		ret = sec_queue_config(info, &info->queues[i], i);
+		if (ret)
+			goto queues_unconfig;
+
+		ret = sec_queue_irq_init(&info->queues[i]);
+		if (ret) {
+			sec_queue_unconfig(info, &info->queues[i]);
+			goto queues_unconfig;
+		}
+	}
+
+	ret = sec_algs_register();
+	if (ret) {
+		dev_err(dev, "Failed to register algorithms with crypto %d\n",
+			ret);
+		goto queues_unconfig;
+	}
+
+	platform_set_drvdata(pdev, info);
+
+	ret = sec_id_alloc(info);
+	if (ret)
+		goto algs_unregister;
+
+	return 0;
+
+algs_unregister:
+	sec_algs_unregister();
+queues_unconfig:
+	for (j = i - 1; j >= 0; j--) {
+		sec_queue_irq_uninit(&info->queues[j]);
+		sec_queue_unconfig(info, &info->queues[j]);
+	}
+	sec_base_exit(info);
+
+	return ret;
+}
+
+static int sec_remove(struct platform_device *pdev)
+{
+	struct sec_dev_info *info = platform_get_drvdata(pdev);
+	int i;
+
+	/* Unexpose as soon as possible, reuse during remove is fine */
+	sec_id_free(info);
+
+	sec_algs_unregister();
+
+	for (i = 0; i < SEC_Q_NUM; i++) {
+		sec_queue_irq_uninit(&info->queues[i]);
+		sec_queue_unconfig(info, &info->queues[i]);
+	}
+
+	sec_base_exit(info);
+
+	return 0;
+}
+
+static const __maybe_unused struct of_device_id sec_match[] = {
+	{ .compatible = "hisilicon,hip06-sec" },
+	{ .compatible = "hisilicon,hip07-sec" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, sec_match);
+
+static const __maybe_unused struct acpi_device_id sec_acpi_match[] = {
+	{ "HISI02C1", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
+
+static struct platform_driver sec_driver = {
+	.probe = sec_probe,
+	.remove = sec_remove,
+	.driver = {
+		.name = "hisi_sec_platform_driver",
+		.of_match_table = sec_match,
+		.acpi_match_table = ACPI_PTR(sec_acpi_match),
+	},
+};
+module_platform_driver(sec_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
+MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
new file mode 100644
index 0000000..2d2f186
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef _SEC_DRV_H_
+#define _SEC_DRV_H_
+
+#include <crypto/algapi.h>
+#include <linux/kfifo.h>
+
+#define SEC_MAX_SGE_NUM			64
+#define SEC_HW_RING_NUM			3
+
+#define SEC_CMD_RING			0
+#define SEC_OUTORDER_RING		1
+#define SEC_DBG_RING			2
+
+/* A reasonable length to balance memory use against flexibility */
+#define SEC_QUEUE_LEN			512
+
+#define SEC_MAX_SGE_NUM   64
+
+struct sec_bd_info {
+#define SEC_BD_W0_T_LEN_M			GENMASK(4, 0)
+#define SEC_BD_W0_T_LEN_S			0
+
+#define SEC_BD_W0_C_WIDTH_M			GENMASK(6, 5)
+#define SEC_BD_W0_C_WIDTH_S			5
+#define   SEC_C_WIDTH_AES_128BIT		0
+#define   SEC_C_WIDTH_AES_8BIT		1
+#define   SEC_C_WIDTH_AES_1BIT		2
+#define   SEC_C_WIDTH_DES_64BIT		0
+#define   SEC_C_WIDTH_DES_8BIT		1
+#define   SEC_C_WIDTH_DES_1BIT		2
+
+#define SEC_BD_W0_C_MODE_M			GENMASK(9, 7)
+#define SEC_BD_W0_C_MODE_S			7
+#define   SEC_C_MODE_ECB			0
+#define   SEC_C_MODE_CBC			1
+#define   SEC_C_MODE_CTR			4
+#define   SEC_C_MODE_CCM			5
+#define   SEC_C_MODE_GCM			6
+#define   SEC_C_MODE_XTS			7
+
+#define SEC_BD_W0_SEQ				BIT(10)
+#define SEC_BD_W0_DE				BIT(11)
+#define SEC_BD_W0_DAT_SKIP_M			GENMASK(13, 12)
+#define SEC_BD_W0_DAT_SKIP_S			12
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_M		GENMASK(17, 14)
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_S		14
+
+#define SEC_BD_W0_CIPHER_M			GENMASK(19, 18)
+#define SEC_BD_W0_CIPHER_S			18
+#define   SEC_CIPHER_NULL			0
+#define   SEC_CIPHER_ENCRYPT			1
+#define   SEC_CIPHER_DECRYPT			2
+
+#define SEC_BD_W0_AUTH_M			GENMASK(21, 20)
+#define SEC_BD_W0_AUTH_S			20
+#define   SEC_AUTH_NULL				0
+#define   SEC_AUTH_MAC				1
+#define   SEC_AUTH_VERIF			2
+
+#define SEC_BD_W0_AI_GEN			BIT(22)
+#define SEC_BD_W0_CI_GEN			BIT(23)
+#define SEC_BD_W0_NO_HPAD			BIT(24)
+#define SEC_BD_W0_HM_M				GENMASK(26, 25)
+#define SEC_BD_W0_HM_S				25
+#define SEC_BD_W0_ICV_OR_SKEY_EN_M		GENMASK(28, 27)
+#define SEC_BD_W0_ICV_OR_SKEY_EN_S		27
+
+/* Multi purpose field - gran size bits for send, flag for recv */
+#define SEC_BD_W0_FLAG_M			GENMASK(30, 29)
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_M		GENMASK(30, 29)
+#define SEC_BD_W0_FLAG_S			29
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_S		29
+
+#define SEC_BD_W0_DONE				BIT(31)
+	u32 w0;
+
+#define SEC_BD_W1_AUTH_GRAN_SIZE_M		GENMASK(21, 0)
+#define SEC_BD_W1_AUTH_GRAN_SIZE_S		0
+#define SEC_BD_W1_M_KEY_EN			BIT(22)
+#define SEC_BD_W1_BD_INVALID			BIT(23)
+#define SEC_BD_W1_ADDR_TYPE			BIT(24)
+
+#define SEC_BD_W1_A_ALG_M			GENMASK(28, 25)
+#define SEC_BD_W1_A_ALG_S			25
+#define   SEC_A_ALG_SHA1			0
+#define   SEC_A_ALG_SHA256			1
+#define   SEC_A_ALG_MD5				2
+#define   SEC_A_ALG_SHA224			3
+#define   SEC_A_ALG_HMAC_SHA1			8
+#define   SEC_A_ALG_HMAC_SHA224			10
+#define   SEC_A_ALG_HMAC_SHA256			11
+#define   SEC_A_ALG_HMAC_MD5			12
+#define   SEC_A_ALG_AES_XCBC			13
+#define   SEC_A_ALG_AES_CMAC			14
+
+#define SEC_BD_W1_C_ALG_M			GENMASK(31, 29)
+#define SEC_BD_W1_C_ALG_S			29
+#define   SEC_C_ALG_DES				0
+#define   SEC_C_ALG_3DES			1
+#define   SEC_C_ALG_AES				2
+
+	u32 w1;
+
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_M		GENMASK(15, 0)
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_S		0
+#define SEC_BD_W2_GRAN_NUM_M			GENMASK(31, 16)
+#define SEC_BD_W2_GRAN_NUM_S			16
+	u32 w2;
+
+#define SEC_BD_W3_AUTH_LEN_OFFSET_M		GENMASK(9, 0)
+#define SEC_BD_W3_AUTH_LEN_OFFSET_S		0
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_M		GENMASK(19, 10)
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_S		10
+#define SEC_BD_W3_MAC_LEN_M			GENMASK(24, 20)
+#define SEC_BD_W3_MAC_LEN_S			20
+#define SEC_BD_W3_A_KEY_LEN_M			GENMASK(29, 25)
+#define SEC_BD_W3_A_KEY_LEN_S			25
+#define SEC_BD_W3_C_KEY_LEN_M			GENMASK(31, 30)
+#define SEC_BD_W3_C_KEY_LEN_S			30
+#define   SEC_KEY_LEN_AES_128			0
+#define   SEC_KEY_LEN_AES_192			1
+#define   SEC_KEY_LEN_AES_256			2
+#define   SEC_KEY_LEN_DES			1
+#define   SEC_KEY_LEN_3DES_3_KEY		1
+#define   SEC_KEY_LEN_3DES_2_KEY		3
+	u32 w3;
+
+	/* W4,5 */
+	union {
+		u32 authkey_addr_lo;
+		u32 authiv_addr_lo;
+	};
+	union {
+		u32 authkey_addr_hi;
+		u32 authiv_addr_hi;
+	};
+
+	/* W6,7 */
+	u32 cipher_key_addr_lo;
+	u32 cipher_key_addr_hi;
+
+	/* W8,9 */
+	u32 cipher_iv_addr_lo;
+	u32 cipher_iv_addr_hi;
+
+	/* W10,11 */
+	u32 data_addr_lo;
+	u32 data_addr_hi;
+
+	/* W12,13 */
+	u32 mac_addr_lo;
+	u32 mac_addr_hi;
+
+	/* W14,15 */
+	u32 cipher_destin_addr_lo;
+	u32 cipher_destin_addr_hi;
+};
+
+enum sec_mem_region {
+	SEC_COMMON = 0,
+	SEC_SAA,
+	SEC_NUM_ADDR_REGIONS
+};
+
+#define SEC_NAME_SIZE				64
+#define SEC_Q_NUM				16
+
+
+/**
+ * struct sec_queue_ring_cmd - store information about a SEC HW cmd ring
+ * @used: Local counter used to cheaply establish if the ring is empty.
+ * @lock: Protect against simultaneous adjusting of the read and write pointers.
+ * @vaddr: Virtual address for the ram pages used for the ring.
+ * @paddr: Physical address of the dma mapped region of ram used for the ring.
+ * @callback: Callback function called on a ring element completing.
+ */
+struct sec_queue_ring_cmd {
+	atomic_t used;
+	struct mutex lock;
+	struct sec_bd_info *vaddr;
+	dma_addr_t paddr;
+	void (*callback)(struct sec_bd_info *resp, void *ctx);
+};
+
+struct sec_debug_bd_info;
+struct sec_queue_ring_db {
+	struct sec_debug_bd_info *vaddr;
+	dma_addr_t paddr;
+};
+
+struct sec_out_bd_info;
+struct sec_queue_ring_cq {
+	struct sec_out_bd_info *vaddr;
+	dma_addr_t paddr;
+};
+
+struct sec_dev_info;
+
+enum sec_cipher_alg {
+	SEC_C_DES_ECB_64,
+	SEC_C_DES_CBC_64,
+
+	SEC_C_3DES_ECB_192_3KEY,
+	SEC_C_3DES_ECB_192_2KEY,
+
+	SEC_C_3DES_CBC_192_3KEY,
+	SEC_C_3DES_CBC_192_2KEY,
+
+	SEC_C_AES_ECB_128,
+	SEC_C_AES_ECB_192,
+	SEC_C_AES_ECB_256,
+
+	SEC_C_AES_CBC_128,
+	SEC_C_AES_CBC_192,
+	SEC_C_AES_CBC_256,
+
+	SEC_C_AES_CTR_128,
+	SEC_C_AES_CTR_192,
+	SEC_C_AES_CTR_256,
+
+	SEC_C_AES_XTS_128,
+	SEC_C_AES_XTS_256,
+
+	SEC_C_NULL,
+};
+
+/**
+ * struct sec_alg_tfm_ctx - hardware specific tranformation context
+ * @cipher_alg: Cipher algorithm enabled include encryption mode.
+ * @key: Key storage if required.
+ * @pkey: DMA address for the key storage.
+ * @req_template: Request template to save time on setup.
+ * @queue: The hardware queue associated with this tfm context.
+ * @lock: Protect key and pkey to ensure they are consistent
+ * @auth_buf: Current context buffer for auth operations.
+ * @backlog: The backlog queue used for cases where our buffers aren't
+ * large enough.
+ */
+struct sec_alg_tfm_ctx {
+	enum sec_cipher_alg cipher_alg;
+	u8 *key;
+	dma_addr_t pkey;
+	struct sec_bd_info req_template;
+	struct sec_queue *queue;
+	struct mutex lock;
+	u8 *auth_buf;
+	struct list_head backlog;
+};
+
+/**
+ * struct sec_request - data associate with a single crypto request
+ * @elements: List of subparts of this request (hardware size restriction)
+ * @num_elements: The number of subparts (used as an optimization)
+ * @lock: Protect elements of this structure against concurrent change.
+ * @tfm_ctx: hardware specific context.
+ * @len_in: length of in sgl from upper layers
+ * @len_out: length of out sgl from upper layers
+ * @dma_iv: initialization vector - phsyical address
+ * @err: store used to track errors across subelements of this request.
+ * @req_base: pointer to base element of associate crypto context.
+ * This is needed to allow shared handling skcipher, ahash etc.
+ * @cb: completion callback.
+ * @backlog_head: list head to allow backlog maintenance.
+ *
+ * The hardware is limited in the maximum size of data that it can
+ * process from a single BD.  Typically this is fairly large (32MB)
+ * but still requires the complexity of splitting the incoming
+ * skreq up into a number of elements complete with appropriate
+ * iv chaining.
+ */
+struct sec_request {
+	struct list_head elements;
+	int num_elements;
+	struct mutex lock;
+	struct sec_alg_tfm_ctx *tfm_ctx;
+	int len_in;
+	int len_out;
+	dma_addr_t dma_iv;
+	int err;
+	struct crypto_async_request *req_base;
+	void (*cb)(struct sec_bd_info *resp, struct crypto_async_request *req);
+	struct list_head backlog_head;
+};
+
+/**
+ * struct sec_request_el - A subpart of a request.
+ * @head: allow us to attach this to the list in the sec_request
+ * @req: hardware block descriptor corresponding to this request subpart
+ * @in: hardware sgl for input - virtual address
+ * @dma_in: hardware sgl for input - physical address
+ * @sgl_in: scatterlist for this request subpart
+ * @out: hardware sgl for output - virtual address
+ * @dma_out: hardware sgl for output - physical address
+ * @sgl_out: scatterlist for this request subpart
+ * @sec_req: The request which this subpart forms a part of
+ * @el_length: Number of bytes in this subpart. Needed to locate
+ * last ivsize chunk for iv chaining.
+ */
+struct sec_request_el {
+	struct list_head head;
+	struct sec_bd_info req;
+	struct sec_hw_sgl *in;
+	dma_addr_t dma_in;
+	struct scatterlist *sgl_in;
+	struct sec_hw_sgl *out;
+	dma_addr_t dma_out;
+	struct scatterlist *sgl_out;
+	struct sec_request *sec_req;
+	size_t el_length;
+};
+
+/**
+ * struct sec_queue - All the information about a HW queue
+ * @dev_info: The parent SEC device to which this queue belongs.
+ * @task_irq: Completion interrupt for the queue.
+ * @name: Human readable queue description also used as irq name.
+ * @ring: The several HW rings associated with one queue.
+ * @regs: The iomapped device registers
+ * @queue_id: Index of the queue used for naming and resource selection.
+ * @in_use: Flag to say if the queue is in use.
+ * @expected: The next expected element to finish assuming we were in order.
+ * @uprocessed: A bitmap to track which OoO elements are done but not handled.
+ * @softqueue: A software queue used when chaining requirements prevent direct
+ *   use of the hardware queues.
+ * @havesoftqueue: A flag to say we have a queues - as we may need one for the
+ *   current mode.
+ * @queuelock: Protect the soft queue from concurrent changes to avoid some
+ *   potential loss of data races.
+ * @shadow: Pointers back to the shadow copy of the hardware ring element
+ *   need because we can't store any context reference in the bd element.
+ */
+struct sec_queue {
+	struct sec_dev_info *dev_info;
+	int task_irq;
+	char name[SEC_NAME_SIZE];
+	struct sec_queue_ring_cmd ring_cmd;
+	struct sec_queue_ring_cq ring_cq;
+	struct sec_queue_ring_db ring_db;
+	void __iomem *regs;
+	u32 queue_id;
+	bool in_use;
+	int expected;
+
+	DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
+	DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
+	bool havesoftqueue;
+	struct mutex queuelock;
+	void *shadow[SEC_QUEUE_LEN];
+};
+
+/**
+ * struct sec_hw_sge: Track each of the 64 element SEC HW SGL entries
+ * @buf: The IOV dma address for this entry.
+ * @len: Length of this IOV.
+ * @pad: Reserved space.
+ */
+struct sec_hw_sge {
+	dma_addr_t buf;
+	unsigned int len;
+	unsigned int pad;
+};
+
+/**
+ * struct sec_hw_sgl: One hardware SGL entry.
+ * @next_sgl: The next entry if we need to chain dma address. Null if last.
+ * @entry_sum_in_chain: The full count of SGEs - only matters for first SGL.
+ * @entry_sum_in_sgl: The number of SGEs in this SGL element.
+ * @flag: Unused in skciphers.
+ * @serial_num: Unsued in skciphers.
+ * @cpuid: Currently unused.
+ * @data_bytes_in_sgl: Count of bytes from all SGEs in this SGL.
+ * @next: Virtual address used to stash the next sgl - useful in completion.
+ * @reserved: A reserved field not currently used.
+ * @sge_entries: The (up to) 64 Scatter Gather Entries, representing IOVs.
+ * @node: Currently unused.
+ */
+struct sec_hw_sgl {
+	dma_addr_t next_sgl;
+	u16 entry_sum_in_chain;
+	u16 entry_sum_in_sgl;
+	u32 flag;
+	u64 serial_num;
+	u32 cpuid;
+	u32 data_bytes_in_sgl;
+	struct sec_hw_sgl *next;
+	u64 reserved;
+	struct sec_hw_sge  sge_entries[SEC_MAX_SGE_NUM];
+	u8 node[16];
+};
+
+struct dma_pool;
+
+/**
+ * struct sec_dev_info: The full SEC unit comprising queues and processors.
+ * @sec_id: Index used to track which SEC this is when more than one is present.
+ * @num_saas: The number of backed processors enabled.
+ * @regs: iomapped register regions shared by whole SEC unit.
+ * @dev_lock: Protects concurrent queue allocation / freeing for the SEC.
+ * @queues: The 16 queues that this SEC instance provides.
+ * @dev: Device pointer.
+ * @hw_sgl_pool: DMA pool used to mimise mapping for the scatter gather lists.
+ */
+struct sec_dev_info {
+	int sec_id;
+	int num_saas;
+	void __iomem *regs[SEC_NUM_ADDR_REGIONS];
+	struct mutex dev_lock;
+	int queues_in_use;
+	struct sec_queue queues[SEC_Q_NUM];
+	struct device *dev;
+	struct dma_pool *hw_sgl_pool;
+};
+
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx);
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num);
+int sec_queue_stop_release(struct sec_queue *queue);
+struct sec_queue *sec_queue_alloc_start_safe(void);
+bool sec_queue_empty(struct sec_queue *queue);
+
+/* Algorithm specific elements from sec_algs.c */
+void sec_alg_callback(struct sec_bd_info *resp, void *ctx);
+int sec_algs_register(void);
+void sec_algs_unregister(void);
+
+#endif /* _SEC_DRV_H_ */
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
new file mode 100644
index 0000000..b87000a
--- /dev/null
+++ b/drivers/crypto/img-hash.c
@@ -0,0 +1,1125 @@
+/*
+ * Copyright (c) 2014 Imagination Technologies
+ * Authors:  Will Thomas, James Hartley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ *	Interface structure taken from omap-sham driver
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#define CR_RESET			0
+#define CR_RESET_SET			1
+#define CR_RESET_UNSET			0
+
+#define CR_MESSAGE_LENGTH_H		0x4
+#define CR_MESSAGE_LENGTH_L		0x8
+
+#define CR_CONTROL			0xc
+#define CR_CONTROL_BYTE_ORDER_3210	0
+#define CR_CONTROL_BYTE_ORDER_0123	1
+#define CR_CONTROL_BYTE_ORDER_2310	2
+#define CR_CONTROL_BYTE_ORDER_1032	3
+#define CR_CONTROL_BYTE_ORDER_SHIFT	8
+#define CR_CONTROL_ALGO_MD5	0
+#define CR_CONTROL_ALGO_SHA1	1
+#define CR_CONTROL_ALGO_SHA224	2
+#define CR_CONTROL_ALGO_SHA256	3
+
+#define CR_INTSTAT			0x10
+#define CR_INTENAB			0x14
+#define CR_INTCLEAR			0x18
+#define CR_INT_RESULTS_AVAILABLE	BIT(0)
+#define CR_INT_NEW_RESULTS_SET		BIT(1)
+#define CR_INT_RESULT_READ_ERR		BIT(2)
+#define CR_INT_MESSAGE_WRITE_ERROR	BIT(3)
+#define CR_INT_STATUS			BIT(8)
+
+#define CR_RESULT_QUEUE		0x1c
+#define CR_RSD0				0x40
+#define CR_CORE_REV			0x50
+#define CR_CORE_DES1		0x60
+#define CR_CORE_DES2		0x70
+
+#define DRIVER_FLAGS_BUSY		BIT(0)
+#define DRIVER_FLAGS_FINAL		BIT(1)
+#define DRIVER_FLAGS_DMA_ACTIVE		BIT(2)
+#define DRIVER_FLAGS_OUTPUT_READY	BIT(3)
+#define DRIVER_FLAGS_INIT		BIT(4)
+#define DRIVER_FLAGS_CPU		BIT(5)
+#define DRIVER_FLAGS_DMA_READY		BIT(6)
+#define DRIVER_FLAGS_ERROR		BIT(7)
+#define DRIVER_FLAGS_SG			BIT(8)
+#define DRIVER_FLAGS_SHA1		BIT(18)
+#define DRIVER_FLAGS_SHA224		BIT(19)
+#define DRIVER_FLAGS_SHA256		BIT(20)
+#define DRIVER_FLAGS_MD5		BIT(21)
+
+#define IMG_HASH_QUEUE_LENGTH		20
+#define IMG_HASH_DMA_BURST		4
+#define IMG_HASH_DMA_THRESHOLD		64
+
+#ifdef __LITTLE_ENDIAN
+#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_3210
+#else
+#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_0123
+#endif
+
+struct img_hash_dev;
+
+struct img_hash_request_ctx {
+	struct img_hash_dev	*hdev;
+	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+	unsigned long		flags;
+	size_t			digsize;
+
+	dma_addr_t		dma_addr;
+	size_t			dma_ct;
+
+	/* sg root */
+	struct scatterlist	*sgfirst;
+	/* walk state */
+	struct scatterlist	*sg;
+	size_t			nents;
+	size_t			offset;
+	unsigned int		total;
+	size_t			sent;
+
+	unsigned long		op;
+
+	size_t			bufcnt;
+	struct ahash_request	fallback_req;
+
+	/* Zero length buffer must remain last member of struct */
+	u8 buffer[0] __aligned(sizeof(u32));
+};
+
+struct img_hash_ctx {
+	struct img_hash_dev	*hdev;
+	unsigned long		flags;
+	struct crypto_ahash	*fallback;
+};
+
+struct img_hash_dev {
+	struct list_head	list;
+	struct device		*dev;
+	struct clk		*hash_clk;
+	struct clk		*sys_clk;
+	void __iomem		*io_base;
+
+	phys_addr_t		bus_addr;
+	void __iomem		*cpu_addr;
+
+	spinlock_t		lock;
+	int			err;
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	dma_task;
+
+	unsigned long		flags;
+	struct crypto_queue	queue;
+	struct ahash_request	*req;
+
+	struct dma_chan		*dma_lch;
+};
+
+struct img_hash_drv {
+	struct list_head dev_list;
+	spinlock_t lock;
+};
+
+static struct img_hash_drv img_hash = {
+	.dev_list = LIST_HEAD_INIT(img_hash.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
+};
+
+static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
+{
+	return readl_relaxed(hdev->io_base + offset);
+}
+
+static inline void img_hash_write(struct img_hash_dev *hdev,
+				  u32 offset, u32 value)
+{
+	writel_relaxed(value, hdev->io_base + offset);
+}
+
+static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
+{
+	return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
+}
+
+static void img_hash_start(struct img_hash_dev *hdev, bool dma)
+{
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+	u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
+
+	if (ctx->flags & DRIVER_FLAGS_MD5)
+		cr |= CR_CONTROL_ALGO_MD5;
+	else if (ctx->flags & DRIVER_FLAGS_SHA1)
+		cr |= CR_CONTROL_ALGO_SHA1;
+	else if (ctx->flags & DRIVER_FLAGS_SHA224)
+		cr |= CR_CONTROL_ALGO_SHA224;
+	else if (ctx->flags & DRIVER_FLAGS_SHA256)
+		cr |= CR_CONTROL_ALGO_SHA256;
+	dev_dbg(hdev->dev, "Starting hash process\n");
+	img_hash_write(hdev, CR_CONTROL, cr);
+
+	/*
+	 * The hardware block requires two cycles between writing the control
+	 * register and writing the first word of data in non DMA mode, to
+	 * ensure the first data write is not grouped in burst with the control
+	 * register write a read is issued to 'flush' the bus.
+	 */
+	if (!dma)
+		img_hash_read(hdev, CR_CONTROL);
+}
+
+static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
+			     size_t length, int final)
+{
+	u32 count, len32;
+	const u32 *buffer = (const u32 *)buf;
+
+	dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
+
+	if (final)
+		hdev->flags |= DRIVER_FLAGS_FINAL;
+
+	len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+	for (count = 0; count < len32; count++)
+		writel_relaxed(buffer[count], hdev->cpu_addr);
+
+	return -EINPROGRESS;
+}
+
+static void img_hash_dma_callback(void *data)
+{
+	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+	if (ctx->bufcnt) {
+		img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
+		ctx->bufcnt = 0;
+	}
+	if (ctx->sg)
+		tasklet_schedule(&hdev->dma_task);
+}
+
+static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
+{
+	struct dma_async_tx_descriptor *desc;
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+	ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
+	if (ctx->dma_ct == 0) {
+		dev_err(hdev->dev, "Invalid DMA sg\n");
+		hdev->err = -EINVAL;
+		return -EINVAL;
+	}
+
+	desc = dmaengine_prep_slave_sg(hdev->dma_lch,
+				       sg,
+				       ctx->dma_ct,
+				       DMA_MEM_TO_DEV,
+				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc) {
+		dev_err(hdev->dev, "Null DMA descriptor\n");
+		hdev->err = -EINVAL;
+		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
+		return -EINVAL;
+	}
+	desc->callback = img_hash_dma_callback;
+	desc->callback_param = hdev;
+	dmaengine_submit(desc);
+	dma_async_issue_pending(hdev->dma_lch);
+
+	return 0;
+}
+
+static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
+{
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+	ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
+					ctx->buffer, hdev->req->nbytes);
+
+	ctx->total = hdev->req->nbytes;
+	ctx->bufcnt = 0;
+
+	hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
+
+	img_hash_start(hdev, false);
+
+	return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
+}
+
+static int img_hash_finish(struct ahash_request *req)
+{
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+
+	if (!req->result)
+		return -EINVAL;
+
+	memcpy(req->result, ctx->digest, ctx->digsize);
+
+	return 0;
+}
+
+static void img_hash_copy_hash(struct ahash_request *req)
+{
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+	u32 *hash = (u32 *)ctx->digest;
+	int i;
+
+	for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
+		hash[i] = img_hash_read_result_queue(ctx->hdev);
+}
+
+static void img_hash_finish_req(struct ahash_request *req, int err)
+{
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+	struct img_hash_dev *hdev =  ctx->hdev;
+
+	if (!err) {
+		img_hash_copy_hash(req);
+		if (DRIVER_FLAGS_FINAL & hdev->flags)
+			err = img_hash_finish(req);
+	} else {
+		dev_warn(hdev->dev, "Hash failed with error %d\n", err);
+		ctx->flags |= DRIVER_FLAGS_ERROR;
+	}
+
+	hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
+		DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
+
+	if (req->base.complete)
+		req->base.complete(&req->base, err);
+}
+
+static int img_hash_write_via_dma(struct img_hash_dev *hdev)
+{
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+	img_hash_start(hdev, true);
+
+	dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
+
+	if (!ctx->total)
+		hdev->flags |= DRIVER_FLAGS_FINAL;
+
+	hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
+
+	tasklet_schedule(&hdev->dma_task);
+
+	return -EINPROGRESS;
+}
+
+static int img_hash_dma_init(struct img_hash_dev *hdev)
+{
+	struct dma_slave_config dma_conf;
+	int err = -EINVAL;
+
+	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
+	if (!hdev->dma_lch) {
+		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
+		return -EBUSY;
+	}
+	dma_conf.direction = DMA_MEM_TO_DEV;
+	dma_conf.dst_addr = hdev->bus_addr;
+	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
+	dma_conf.device_fc = false;
+
+	err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
+	if (err) {
+		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
+		dma_release_channel(hdev->dma_lch);
+		return err;
+	}
+
+	return 0;
+}
+
+static void img_hash_dma_task(unsigned long d)
+{
+	struct img_hash_dev *hdev = (struct img_hash_dev *)d;
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+	u8 *addr;
+	size_t nbytes, bleft, wsend, len, tbc;
+	struct scatterlist tsg;
+
+	if (!hdev->req || !ctx->sg)
+		return;
+
+	addr = sg_virt(ctx->sg);
+	nbytes = ctx->sg->length - ctx->offset;
+
+	/*
+	 * The hash accelerator does not support a data valid mask. This means
+	 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
+	 * padding bytes in the last word written by that dma would erroneously
+	 * be included in the hash. To avoid this we round down the transfer,
+	 * and add the excess to the start of the next dma. It does not matter
+	 * that the final dma may not be a multiple of 4 bytes as the hashing
+	 * block is programmed to accept the correct number of bytes.
+	 */
+
+	bleft = nbytes % 4;
+	wsend = (nbytes / 4);
+
+	if (wsend) {
+		sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
+		if (img_hash_xmit_dma(hdev, &tsg)) {
+			dev_err(hdev->dev, "DMA failed, falling back to CPU");
+			ctx->flags |= DRIVER_FLAGS_CPU;
+			hdev->err = 0;
+			img_hash_xmit_cpu(hdev, addr + ctx->offset,
+					  wsend * 4, 0);
+			ctx->sent += wsend * 4;
+			wsend = 0;
+		} else {
+			ctx->sent += wsend * 4;
+		}
+	}
+
+	if (bleft) {
+		ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
+						 ctx->buffer, bleft, ctx->sent);
+		tbc = 0;
+		ctx->sg = sg_next(ctx->sg);
+		while (ctx->sg && (ctx->bufcnt < 4)) {
+			len = ctx->sg->length;
+			if (likely(len > (4 - ctx->bufcnt)))
+				len = 4 - ctx->bufcnt;
+			tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
+						 ctx->buffer + ctx->bufcnt, len,
+					ctx->sent + ctx->bufcnt);
+			ctx->bufcnt += tbc;
+			if (tbc >= ctx->sg->length) {
+				ctx->sg = sg_next(ctx->sg);
+				tbc = 0;
+			}
+		}
+
+		ctx->sent += ctx->bufcnt;
+		ctx->offset = tbc;
+
+		if (!wsend)
+			img_hash_dma_callback(hdev);
+	} else {
+		ctx->offset = 0;
+		ctx->sg = sg_next(ctx->sg);
+	}
+}
+
+static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
+{
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
+
+	if (ctx->flags & DRIVER_FLAGS_SG)
+		dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+static int img_hash_process_data(struct img_hash_dev *hdev)
+{
+	struct ahash_request *req = hdev->req;
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+	int err = 0;
+
+	ctx->bufcnt = 0;
+
+	if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
+		dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
+			req->nbytes);
+		err = img_hash_write_via_dma(hdev);
+	} else {
+		dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
+			req->nbytes);
+		err = img_hash_write_via_cpu(hdev);
+	}
+	return err;
+}
+
+static int img_hash_hw_init(struct img_hash_dev *hdev)
+{
+	unsigned long long nbits;
+	u32 u, l;
+
+	img_hash_write(hdev, CR_RESET, CR_RESET_SET);
+	img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
+	img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
+
+	nbits = (u64)hdev->req->nbytes << 3;
+	u = nbits >> 32;
+	l = nbits;
+	img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
+	img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
+
+	if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
+		hdev->flags |= DRIVER_FLAGS_INIT;
+		hdev->err = 0;
+	}
+	dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
+	return 0;
+}
+
+static int img_hash_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+	rctx->fallback_req.base.flags =	req->base.flags
+		& CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int img_hash_handle_queue(struct img_hash_dev *hdev,
+				 struct ahash_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct img_hash_request_ctx *ctx;
+	unsigned long flags;
+	int err = 0, res = 0;
+
+	spin_lock_irqsave(&hdev->lock, flags);
+
+	if (req)
+		res = ahash_enqueue_request(&hdev->queue, req);
+
+	if (DRIVER_FLAGS_BUSY & hdev->flags) {
+		spin_unlock_irqrestore(&hdev->lock, flags);
+		return res;
+	}
+
+	backlog = crypto_get_backlog(&hdev->queue);
+	async_req = crypto_dequeue_request(&hdev->queue);
+	if (async_req)
+		hdev->flags |= DRIVER_FLAGS_BUSY;
+
+	spin_unlock_irqrestore(&hdev->lock, flags);
+
+	if (!async_req)
+		return res;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ahash_request_cast(async_req);
+	hdev->req = req;
+
+	ctx = ahash_request_ctx(req);
+
+	dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
+		 ctx->op, req->nbytes);
+
+	err = img_hash_hw_init(hdev);
+
+	if (!err)
+		err = img_hash_process_data(hdev);
+
+	if (err != -EINPROGRESS) {
+		/* done_task will not finish so do it here */
+		img_hash_finish_req(req, err);
+	}
+	return res;
+}
+
+static int img_hash_update(struct ahash_request *req)
+{
+	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+	rctx->fallback_req.base.flags = req->base.flags
+		& CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->fallback_req.nbytes = req->nbytes;
+	rctx->fallback_req.src = req->src;
+
+	return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int img_hash_final(struct ahash_request *req)
+{
+	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+	rctx->fallback_req.base.flags = req->base.flags
+		& CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->fallback_req.result = req->result;
+
+	return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int img_hash_finup(struct ahash_request *req)
+{
+	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+	rctx->fallback_req.base.flags = req->base.flags
+		& CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->fallback_req.nbytes = req->nbytes;
+	rctx->fallback_req.src = req->src;
+	rctx->fallback_req.result = req->result;
+
+	return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int img_hash_import(struct ahash_request *req, const void *in)
+{
+	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+	rctx->fallback_req.base.flags = req->base.flags
+		& CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int img_hash_export(struct ahash_request *req, void *out)
+{
+	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+	rctx->fallback_req.base.flags = req->base.flags
+		& CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+static int img_hash_digest(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
+	struct img_hash_dev *hdev = NULL;
+	struct img_hash_dev *tmp;
+	int err;
+
+	spin_lock(&img_hash.lock);
+	if (!tctx->hdev) {
+		list_for_each_entry(tmp, &img_hash.dev_list, list) {
+			hdev = tmp;
+			break;
+		}
+		tctx->hdev = hdev;
+
+	} else {
+		hdev = tctx->hdev;
+	}
+
+	spin_unlock(&img_hash.lock);
+	ctx->hdev = hdev;
+	ctx->flags = 0;
+	ctx->digsize = crypto_ahash_digestsize(tfm);
+
+	switch (ctx->digsize) {
+	case SHA1_DIGEST_SIZE:
+		ctx->flags |= DRIVER_FLAGS_SHA1;
+		break;
+	case SHA256_DIGEST_SIZE:
+		ctx->flags |= DRIVER_FLAGS_SHA256;
+		break;
+	case SHA224_DIGEST_SIZE:
+		ctx->flags |= DRIVER_FLAGS_SHA224;
+		break;
+	case MD5_DIGEST_SIZE:
+		ctx->flags |= DRIVER_FLAGS_MD5;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ctx->bufcnt = 0;
+	ctx->offset = 0;
+	ctx->sent = 0;
+	ctx->total = req->nbytes;
+	ctx->sg = req->src;
+	ctx->sgfirst = req->src;
+	ctx->nents = sg_nents(ctx->sg);
+
+	err = img_hash_handle_queue(tctx->hdev, req);
+
+	return err;
+}
+
+static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
+{
+	struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+	int err = -ENOMEM;
+
+	ctx->fallback = crypto_alloc_ahash(alg_name, 0,
+					   CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->fallback)) {
+		pr_err("img_hash: Could not load fallback driver.\n");
+		err = PTR_ERR(ctx->fallback);
+		goto err;
+	}
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct img_hash_request_ctx) +
+				 crypto_ahash_reqsize(ctx->fallback) +
+				 IMG_HASH_DMA_THRESHOLD);
+
+	return 0;
+
+err:
+	return err;
+}
+
+static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
+{
+	return img_hash_cra_init(tfm, "md5-generic");
+}
+
+static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
+{
+	return img_hash_cra_init(tfm, "sha1-generic");
+}
+
+static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
+{
+	return img_hash_cra_init(tfm, "sha224-generic");
+}
+
+static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
+{
+	return img_hash_cra_init(tfm, "sha256-generic");
+}
+
+static void img_hash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_ahash(tctx->fallback);
+}
+
+static irqreturn_t img_irq_handler(int irq, void *dev_id)
+{
+	struct img_hash_dev *hdev = dev_id;
+	u32 reg;
+
+	reg = img_hash_read(hdev, CR_INTSTAT);
+	img_hash_write(hdev, CR_INTCLEAR, reg);
+
+	if (reg & CR_INT_NEW_RESULTS_SET) {
+		dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
+		if (DRIVER_FLAGS_BUSY & hdev->flags) {
+			hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
+			if (!(DRIVER_FLAGS_CPU & hdev->flags))
+				hdev->flags |= DRIVER_FLAGS_DMA_READY;
+			tasklet_schedule(&hdev->done_task);
+		} else {
+			dev_warn(hdev->dev,
+				 "HASH interrupt when no active requests.\n");
+		}
+	} else if (reg & CR_INT_RESULTS_AVAILABLE) {
+		dev_warn(hdev->dev,
+			 "IRQ triggered before the hash had completed\n");
+	} else if (reg & CR_INT_RESULT_READ_ERR) {
+		dev_warn(hdev->dev,
+			 "Attempt to read from an empty result queue\n");
+	} else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
+		dev_warn(hdev->dev,
+			 "Data written before the hardware was configured\n");
+	}
+	return IRQ_HANDLED;
+}
+
+static struct ahash_alg img_algs[] = {
+	{
+		.init = img_hash_init,
+		.update = img_hash_update,
+		.final = img_hash_final,
+		.finup = img_hash_finup,
+		.export = img_hash_export,
+		.import = img_hash_import,
+		.digest = img_hash_digest,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct md5_state),
+			.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "img-md5",
+				.cra_priority = 300,
+				.cra_flags =
+				CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct img_hash_ctx),
+				.cra_init = img_hash_cra_md5_init,
+				.cra_exit = img_hash_cra_exit,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = img_hash_init,
+		.update = img_hash_update,
+		.final = img_hash_final,
+		.finup = img_hash_finup,
+		.export = img_hash_export,
+		.import = img_hash_import,
+		.digest = img_hash_digest,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct sha1_state),
+			.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "img-sha1",
+				.cra_priority = 300,
+				.cra_flags =
+				CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct img_hash_ctx),
+				.cra_init = img_hash_cra_sha1_init,
+				.cra_exit = img_hash_cra_exit,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = img_hash_init,
+		.update = img_hash_update,
+		.final = img_hash_final,
+		.finup = img_hash_finup,
+		.export = img_hash_export,
+		.import = img_hash_import,
+		.digest = img_hash_digest,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct sha256_state),
+			.base = {
+				.cra_name = "sha224",
+				.cra_driver_name = "img-sha224",
+				.cra_priority = 300,
+				.cra_flags =
+				CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct img_hash_ctx),
+				.cra_init = img_hash_cra_sha224_init,
+				.cra_exit = img_hash_cra_exit,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = img_hash_init,
+		.update = img_hash_update,
+		.final = img_hash_final,
+		.finup = img_hash_finup,
+		.export = img_hash_export,
+		.import = img_hash_import,
+		.digest = img_hash_digest,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct sha256_state),
+			.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "img-sha256",
+				.cra_priority = 300,
+				.cra_flags =
+				CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct img_hash_ctx),
+				.cra_init = img_hash_cra_sha256_init,
+				.cra_exit = img_hash_cra_exit,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	}
+};
+
+static int img_register_algs(struct img_hash_dev *hdev)
+{
+	int i, err;
+
+	for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
+		err = crypto_register_ahash(&img_algs[i]);
+		if (err)
+			goto err_reg;
+	}
+	return 0;
+
+err_reg:
+	for (; i--; )
+		crypto_unregister_ahash(&img_algs[i]);
+
+	return err;
+}
+
+static int img_unregister_algs(struct img_hash_dev *hdev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(img_algs); i++)
+		crypto_unregister_ahash(&img_algs[i]);
+	return 0;
+}
+
+static void img_hash_done_task(unsigned long data)
+{
+	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
+	int err = 0;
+
+	if (hdev->err == -EINVAL) {
+		err = hdev->err;
+		goto finish;
+	}
+
+	if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
+		img_hash_handle_queue(hdev, NULL);
+		return;
+	}
+
+	if (DRIVER_FLAGS_CPU & hdev->flags) {
+		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
+			hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
+			goto finish;
+		}
+	} else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
+		if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
+			hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
+			img_hash_write_via_dma_stop(hdev);
+			if (hdev->err) {
+				err = hdev->err;
+				goto finish;
+			}
+		}
+		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
+			hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
+					DRIVER_FLAGS_OUTPUT_READY);
+			goto finish;
+		}
+	}
+	return;
+
+finish:
+	img_hash_finish_req(hdev->req, err);
+}
+
+static const struct of_device_id img_hash_match[] = {
+	{ .compatible = "img,hash-accelerator" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, img_hash_match);
+
+static int img_hash_probe(struct platform_device *pdev)
+{
+	struct img_hash_dev *hdev;
+	struct device *dev = &pdev->dev;
+	struct resource *hash_res;
+	int	irq;
+	int err;
+
+	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
+	if (hdev == NULL)
+		return -ENOMEM;
+
+	spin_lock_init(&hdev->lock);
+
+	hdev->dev = dev;
+
+	platform_set_drvdata(pdev, hdev);
+
+	INIT_LIST_HEAD(&hdev->list);
+
+	tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
+	tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
+
+	crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
+
+	/* Register bank */
+	hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	hdev->io_base = devm_ioremap_resource(dev, hash_res);
+	if (IS_ERR(hdev->io_base)) {
+		err = PTR_ERR(hdev->io_base);
+		dev_err(dev, "can't ioremap, returned %d\n", err);
+
+		goto res_err;
+	}
+
+	/* Write port (DMA or CPU) */
+	hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
+	if (IS_ERR(hdev->cpu_addr)) {
+		dev_err(dev, "can't ioremap write port\n");
+		err = PTR_ERR(hdev->cpu_addr);
+		goto res_err;
+	}
+	hdev->bus_addr = hash_res->start;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = irq;
+		goto res_err;
+	}
+
+	err = devm_request_irq(dev, irq, img_irq_handler, 0,
+			       dev_name(dev), hdev);
+	if (err) {
+		dev_err(dev, "unable to request irq\n");
+		goto res_err;
+	}
+	dev_dbg(dev, "using IRQ channel %d\n", irq);
+
+	hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
+	if (IS_ERR(hdev->hash_clk)) {
+		dev_err(dev, "clock initialization failed.\n");
+		err = PTR_ERR(hdev->hash_clk);
+		goto res_err;
+	}
+
+	hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
+	if (IS_ERR(hdev->sys_clk)) {
+		dev_err(dev, "clock initialization failed.\n");
+		err = PTR_ERR(hdev->sys_clk);
+		goto res_err;
+	}
+
+	err = clk_prepare_enable(hdev->hash_clk);
+	if (err)
+		goto res_err;
+
+	err = clk_prepare_enable(hdev->sys_clk);
+	if (err)
+		goto clk_err;
+
+	err = img_hash_dma_init(hdev);
+	if (err)
+		goto dma_err;
+
+	dev_dbg(dev, "using %s for DMA transfers\n",
+		dma_chan_name(hdev->dma_lch));
+
+	spin_lock(&img_hash.lock);
+	list_add_tail(&hdev->list, &img_hash.dev_list);
+	spin_unlock(&img_hash.lock);
+
+	err = img_register_algs(hdev);
+	if (err)
+		goto err_algs;
+	dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
+
+	return 0;
+
+err_algs:
+	spin_lock(&img_hash.lock);
+	list_del(&hdev->list);
+	spin_unlock(&img_hash.lock);
+	dma_release_channel(hdev->dma_lch);
+dma_err:
+	clk_disable_unprepare(hdev->sys_clk);
+clk_err:
+	clk_disable_unprepare(hdev->hash_clk);
+res_err:
+	tasklet_kill(&hdev->done_task);
+	tasklet_kill(&hdev->dma_task);
+
+	return err;
+}
+
+static int img_hash_remove(struct platform_device *pdev)
+{
+	struct img_hash_dev *hdev;
+
+	hdev = platform_get_drvdata(pdev);
+	spin_lock(&img_hash.lock);
+	list_del(&hdev->list);
+	spin_unlock(&img_hash.lock);
+
+	img_unregister_algs(hdev);
+
+	tasklet_kill(&hdev->done_task);
+	tasklet_kill(&hdev->dma_task);
+
+	dma_release_channel(hdev->dma_lch);
+
+	clk_disable_unprepare(hdev->hash_clk);
+	clk_disable_unprepare(hdev->sys_clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int img_hash_suspend(struct device *dev)
+{
+	struct img_hash_dev *hdev = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(hdev->hash_clk);
+	clk_disable_unprepare(hdev->sys_clk);
+
+	return 0;
+}
+
+static int img_hash_resume(struct device *dev)
+{
+	struct img_hash_dev *hdev = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(hdev->hash_clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(hdev->sys_clk);
+	if (ret) {
+		clk_disable_unprepare(hdev->hash_clk);
+		return ret;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_hash_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
+};
+
+static struct platform_driver img_hash_driver = {
+	.probe		= img_hash_probe,
+	.remove		= img_hash_remove,
+	.driver		= {
+		.name	= "img-hash-accelerator",
+		.pm	= &img_hash_pm_ops,
+		.of_match_table	= of_match_ptr(img_hash_match),
+	}
+};
+module_platform_driver(img_hash_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
+MODULE_AUTHOR("Will Thomas.");
+MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");
diff --git a/drivers/crypto/inside-secure/Makefile b/drivers/crypto/inside-secure/Makefile
new file mode 100644
index 0000000..302f07d
--- /dev/null
+++ b/drivers/crypto/inside-secure/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o
+crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
new file mode 100644
index 0000000..86c699c
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -0,0 +1,1215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include "safexcel.h"
+
+static u32 max_rings = EIP197_MAX_RINGS;
+module_param(max_rings, uint, 0644);
+MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
+
+static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
+{
+	u32 val, htable_offset;
+	int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+
+	if (priv->version == EIP197B) {
+		cs_rc_max = EIP197B_CS_RC_MAX;
+		cs_ht_wc = EIP197B_CS_HT_WC;
+		cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
+		cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
+	} else {
+		cs_rc_max = EIP197D_CS_RC_MAX;
+		cs_ht_wc = EIP197D_CS_HT_WC;
+		cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
+		cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
+	}
+
+	/* Enable the record cache memory access */
+	val = readl(priv->base + EIP197_CS_RAM_CTRL);
+	val &= ~EIP197_TRC_ENABLE_MASK;
+	val |= EIP197_TRC_ENABLE_0;
+	writel(val, priv->base + EIP197_CS_RAM_CTRL);
+
+	/* Clear all ECC errors */
+	writel(0, priv->base + EIP197_TRC_ECCCTRL);
+
+	/*
+	 * Make sure the cache memory is accessible by taking record cache into
+	 * reset.
+	 */
+	val = readl(priv->base + EIP197_TRC_PARAMS);
+	val |= EIP197_TRC_PARAMS_SW_RESET;
+	val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
+	writel(val, priv->base + EIP197_TRC_PARAMS);
+
+	/* Clear all records */
+	for (i = 0; i < cs_rc_max; i++) {
+		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
+
+		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
+		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
+		       priv->base + offset);
+
+		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
+		if (i == 0)
+			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
+		else if (i == cs_rc_max - 1)
+			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
+		writel(val, priv->base + offset + sizeof(u32));
+	}
+
+	/* Clear the hash table entries */
+	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
+	for (i = 0; i < cs_ht_wc; i++)
+		writel(GENMASK(29, 0),
+		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
+
+	/* Disable the record cache memory access */
+	val = readl(priv->base + EIP197_CS_RAM_CTRL);
+	val &= ~EIP197_TRC_ENABLE_MASK;
+	writel(val, priv->base + EIP197_CS_RAM_CTRL);
+
+	/* Write head and tail pointers of the record free chain */
+	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
+	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
+	writel(val, priv->base + EIP197_TRC_FREECHAIN);
+
+	/* Configure the record cache #1 */
+	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
+	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
+	writel(val, priv->base + EIP197_TRC_PARAMS2);
+
+	/* Configure the record cache #2 */
+	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
+	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
+	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
+	writel(val, priv->base + EIP197_TRC_PARAMS);
+}
+
+static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
+				  const struct firmware *fw, int pe, u32 ctrl,
+				  u32 prog_en)
+{
+	const u32 *data = (const u32 *)fw->data;
+	u32 val;
+	int i;
+
+	/* Reset the engine to make its program memory accessible */
+	writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
+	       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
+	       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
+	       EIP197_PE(priv) + ctrl);
+
+	/* Enable access to the program memory */
+	writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+
+	/* Write the firmware */
+	for (i = 0; i < fw->size / sizeof(u32); i++)
+		writel(be32_to_cpu(data[i]),
+		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
+
+	/* Disable access to the program memory */
+	writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+
+	/* Release engine from reset */
+	val = readl(EIP197_PE(priv) + ctrl);
+	val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
+	writel(val, EIP197_PE(priv) + ctrl);
+}
+
+static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
+{
+	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
+	const struct firmware *fw[FW_NB];
+	char fw_path[31], *dir = NULL;
+	int i, j, ret = 0, pe;
+	u32 val;
+
+	switch (priv->version) {
+	case EIP197B:
+		dir = "eip197b";
+		break;
+	case EIP197D:
+		dir = "eip197d";
+		break;
+	default:
+		/* No firmware is required */
+		return 0;
+	}
+
+	for (i = 0; i < FW_NB; i++) {
+		snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
+		ret = request_firmware(&fw[i], fw_path, priv->dev);
+		if (ret) {
+			if (priv->version != EIP197B)
+				goto release_fw;
+
+			/* Fallback to the old firmware location for the
+			 * EIP197b.
+			 */
+			ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+			if (ret) {
+				dev_err(priv->dev,
+					"Failed to request firmware %s (%d)\n",
+					fw_name[i], ret);
+				goto release_fw;
+			}
+		}
+	}
+
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Clear the scratchpad memory */
+		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+
+		memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
+			  EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+
+		eip197_write_firmware(priv, fw[FW_IFPP], pe,
+				      EIP197_PE_ICE_FPP_CTRL(pe),
+				      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+
+		eip197_write_firmware(priv, fw[FW_IPUE], pe,
+				      EIP197_PE_ICE_PUE_CTRL(pe),
+				      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+	}
+
+release_fw:
+	for (j = 0; j < i; j++)
+		release_firmware(fw[j]);
+
+	return ret;
+}
+
+static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
+{
+	u32 hdw, cd_size_rnd, val;
+	int i;
+
+	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+	hdw &= GENMASK(27, 25);
+	hdw >>= 25;
+
+	cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* ring base address */
+		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
+		       priv->config.cd_size,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
+		writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
+		       (EIP197_FETCH_COUNT * priv->config.cd_offset),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Configure DMA tx control */
+		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
+		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
+
+		/* clear any pending interrupt */
+		writel(GENMASK(5, 0),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+	}
+
+	return 0;
+}
+
+static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
+{
+	u32 hdw, rd_size_rnd, val;
+	int i;
+
+	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+	hdw &= GENMASK(27, 25);
+	hdw >>= 25;
+
+	rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* ring base address */
+		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
+		       priv->config.rd_size,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
+
+		writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
+		       (EIP197_FETCH_COUNT * priv->config.rd_offset),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Configure DMA tx control */
+		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
+		writel(val,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
+
+		/* clear any pending interrupt */
+		writel(GENMASK(7, 0),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+		/* enable ring interrupt */
+		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+		val |= EIP197_RDR_IRQ(i);
+		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+	}
+
+	return 0;
+}
+
+static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
+{
+	u32 version, val;
+	int i, ret, pe;
+
+	/* Determine endianess and configure byte swap */
+	version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
+	val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+
+	if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
+		val |= EIP197_MST_CTRL_BYTE_SWAP;
+	else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
+		val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
+
+	/* For EIP197 set maximum number of TX commands to 2^5 = 32 */
+	if (priv->version == EIP197B || priv->version == EIP197D)
+		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+
+	writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+
+	/* Configure wr/rd cache values */
+	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
+	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
+	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
+
+	/* Interrupts reset */
+
+	/* Disable all global interrupts */
+	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
+
+	/* Clear any pending interrupt */
+	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
+
+	/* Processing Engine configuration */
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Data Fetch Engine configuration */
+
+		/* Reset all DFE threads */
+		writel(EIP197_DxE_THR_CTRL_RESET_PE,
+		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		if (priv->version == EIP197B || priv->version == EIP197D) {
+			/* Reset HIA input interface arbiter */
+			writel(EIP197_HIA_RA_PE_CTRL_RESET,
+			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+		}
+
+		/* DMA transfer size to use */
+		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
+		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
+		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
+
+		/* Leave the DFE threads reset state */
+		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		/* Configure the processing engine thresholds */
+		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+		       EIP197_PE_IN_xBUF_THRES_MAX(9),
+		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
+		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+		       EIP197_PE_IN_xBUF_THRES_MAX(7),
+		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
+
+		if (priv->version == EIP197B || priv->version == EIP197D) {
+			/* enable HIA input interface arbiter and rings */
+			writel(EIP197_HIA_RA_PE_CTRL_EN |
+			       GENMASK(priv->config.rings - 1, 0),
+			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+		}
+
+		/* Data Store Engine configuration */
+
+		/* Reset all DSE threads */
+		writel(EIP197_DxE_THR_CTRL_RESET_PE,
+		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+		/* Wait for all DSE threads to complete */
+		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
+			GENMASK(15, 12)) != GENMASK(15, 12))
+			;
+
+		/* DMA transfer size to use */
+		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+		/* FIXME: instability issues can occur for EIP97 but disabling it impact
+		 * performances.
+		 */
+		if (priv->version == EIP197B || priv->version == EIP197D)
+			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
+
+		/* Leave the DSE threads reset state */
+		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+		/* Configure the procesing engine thresholds */
+		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
+		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+		/* Processing Engine configuration */
+
+		/* H/W capabilities selection */
+		val = EIP197_FUNCTION_RSVD;
+		val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+		val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
+		val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
+		val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
+		val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
+		val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
+		val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
+		val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
+		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
+	}
+
+	/* Command Descriptor Rings prepare */
+	for (i = 0; i < priv->config.rings; i++) {
+		/* Clear interrupts for this ring */
+		writel(GENMASK(31, 0),
+		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+
+		/* Disable external triggering */
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Clear the pending prepared counter */
+		writel(EIP197_xDR_PREP_CLR_COUNT,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
+
+		/* Clear the pending processed counter */
+		writel(EIP197_xDR_PROC_CLR_COUNT,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
+
+		writel(0,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
+		writel(0,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
+
+		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
+	}
+
+	/* Result Descriptor Ring prepare */
+	for (i = 0; i < priv->config.rings; i++) {
+		/* Disable external triggering*/
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Clear the pending prepared counter */
+		writel(EIP197_xDR_PREP_CLR_COUNT,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
+
+		/* Clear the pending processed counter */
+		writel(EIP197_xDR_PROC_CLR_COUNT,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
+
+		writel(0,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
+		writel(0,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
+
+		/* Ring size */
+		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
+	}
+
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Enable command descriptor rings */
+		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		/* Enable result descriptor rings */
+		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+	}
+
+	/* Clear any HIA interrupt */
+	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
+
+	if (priv->version == EIP197B || priv->version == EIP197D) {
+		eip197_trc_cache_init(priv);
+
+		ret = eip197_load_firmwares(priv);
+		if (ret)
+			return ret;
+	}
+
+	safexcel_hw_setup_cdesc_rings(priv);
+	safexcel_hw_setup_rdesc_rings(priv);
+
+	return 0;
+}
+
+/* Called with ring's lock taken */
+static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+				       int ring)
+{
+	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
+
+	if (!coal)
+		return;
+
+	/* Configure when we want an interrupt */
+	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+}
+
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
+{
+	struct crypto_async_request *req, *backlog;
+	struct safexcel_context *ctx;
+	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
+
+	/* If a request wasn't properly dequeued because of a lack of resources,
+	 * proceeded it first,
+	 */
+	req = priv->ring[ring].req;
+	backlog = priv->ring[ring].backlog;
+	if (req)
+		goto handle_req;
+
+	while (true) {
+		spin_lock_bh(&priv->ring[ring].queue_lock);
+		backlog = crypto_get_backlog(&priv->ring[ring].queue);
+		req = crypto_dequeue_request(&priv->ring[ring].queue);
+		spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+		if (!req) {
+			priv->ring[ring].req = NULL;
+			priv->ring[ring].backlog = NULL;
+			goto finalize;
+		}
+
+handle_req:
+		ctx = crypto_tfm_ctx(req->tfm);
+		ret = ctx->send(req, ring, &commands, &results);
+		if (ret)
+			goto request_failed;
+
+		if (backlog)
+			backlog->complete(backlog, -EINPROGRESS);
+
+		/* In case the send() helper did not issue any command to push
+		 * to the engine because the input data was cached, continue to
+		 * dequeue other requests as this is valid and not an error.
+		 */
+		if (!commands && !results)
+			continue;
+
+		cdesc += commands;
+		rdesc += results;
+		nreq++;
+	}
+
+request_failed:
+	/* Not enough resources to handle all the requests. Bail out and save
+	 * the request and the backlog for the next dequeue call (per-ring).
+	 */
+	priv->ring[ring].req = req;
+	priv->ring[ring].backlog = backlog;
+
+finalize:
+	if (!nreq)
+		return;
+
+	spin_lock_bh(&priv->ring[ring].lock);
+
+	priv->ring[ring].requests += nreq;
+
+	if (!priv->ring[ring].busy) {
+		safexcel_try_push_requests(priv, ring);
+		priv->ring[ring].busy = true;
+	}
+
+	spin_unlock_bh(&priv->ring[ring].lock);
+
+	/* let the RDR know we have pending descriptors */
+	writel((rdesc * priv->config.rd_offset) << 2,
+	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
+
+	/* let the CDR know we have pending descriptors */
+	writel((cdesc * priv->config.cd_offset) << 2,
+	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
+}
+
+inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+				       struct safexcel_result_desc *rdesc)
+{
+	if (likely(!rdesc->result_data.error_code))
+		return 0;
+
+	if (rdesc->result_data.error_code & 0x407f) {
+		/* Fatal error (bits 0-7, 14) */
+		dev_err(priv->dev,
+			"cipher: result: result descriptor error (%d)\n",
+			rdesc->result_data.error_code);
+		return -EIO;
+	} else if (rdesc->result_data.error_code == BIT(9)) {
+		/* Authentication failed */
+		return -EBADMSG;
+	}
+
+	/* All other non-fatal errors */
+	return -EINVAL;
+}
+
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+				 int ring,
+				 struct safexcel_result_desc *rdesc,
+				 struct crypto_async_request *req)
+{
+	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+	priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+	int i = safexcel_ring_first_rdr_index(priv, ring);
+
+	return priv->ring[ring].rdr_req[i];
+}
+
+void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
+{
+	struct safexcel_command_desc *cdesc;
+
+	/* Acknowledge the command descriptors */
+	do {
+		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
+		if (IS_ERR(cdesc)) {
+			dev_err(priv->dev,
+				"Could not retrieve the command descriptor\n");
+			return;
+		}
+	} while (!cdesc->last_seg);
+}
+
+void safexcel_inv_complete(struct crypto_async_request *req, int error)
+{
+	struct safexcel_inv_result *result = req->data;
+
+	if (error == -EINPROGRESS)
+		return;
+
+	result->error = error;
+	complete(&result->completion);
+}
+
+int safexcel_invalidate_cache(struct crypto_async_request *async,
+			      struct safexcel_crypto_priv *priv,
+			      dma_addr_t ctxr_dma, int ring)
+{
+	struct safexcel_command_desc *cdesc;
+	struct safexcel_result_desc *rdesc;
+	int ret = 0;
+
+	/* Prepare command descriptor */
+	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+	if (IS_ERR(cdesc))
+		return PTR_ERR(cdesc);
+
+	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
+	cdesc->control_data.options = 0;
+	cdesc->control_data.refresh = 0;
+	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
+
+	/* Prepare result descriptor */
+	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
+
+	if (IS_ERR(rdesc)) {
+		ret = PTR_ERR(rdesc);
+		goto cdesc_rollback;
+	}
+
+	safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+	return ret;
+
+cdesc_rollback:
+	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+
+	return ret;
+}
+
+static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
+						     int ring)
+{
+	struct crypto_async_request *req;
+	struct safexcel_context *ctx;
+	int ret, i, nreq, ndesc, tot_descs, handled = 0;
+	bool should_complete;
+
+handle_results:
+	tot_descs = 0;
+
+	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
+	if (!nreq)
+		goto requests_left;
+
+	for (i = 0; i < nreq; i++) {
+		req = safexcel_rdr_req_get(priv, ring);
+
+		ctx = crypto_tfm_ctx(req->tfm);
+		ndesc = ctx->handle_result(priv, ring, req,
+					   &should_complete, &ret);
+		if (ndesc < 0) {
+			dev_err(priv->dev, "failed to handle result (%d)", ndesc);
+			goto acknowledge;
+		}
+
+		if (should_complete) {
+			local_bh_disable();
+			req->complete(req, ret);
+			local_bh_enable();
+		}
+
+		tot_descs += ndesc;
+		handled++;
+	}
+
+acknowledge:
+	if (i) {
+		writel(EIP197_xDR_PROC_xD_PKT(i) |
+		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+	}
+
+	/* If the number of requests overflowed the counter, try to proceed more
+	 * requests.
+	 */
+	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+		goto handle_results;
+
+requests_left:
+	spin_lock_bh(&priv->ring[ring].lock);
+
+	priv->ring[ring].requests -= handled;
+	safexcel_try_push_requests(priv, ring);
+
+	if (!priv->ring[ring].requests)
+		priv->ring[ring].busy = false;
+
+	spin_unlock_bh(&priv->ring[ring].lock);
+}
+
+static void safexcel_dequeue_work(struct work_struct *work)
+{
+	struct safexcel_work_data *data =
+			container_of(work, struct safexcel_work_data, work);
+
+	safexcel_dequeue(data->priv, data->ring);
+}
+
+struct safexcel_ring_irq_data {
+	struct safexcel_crypto_priv *priv;
+	int ring;
+};
+
+static irqreturn_t safexcel_irq_ring(int irq, void *data)
+{
+	struct safexcel_ring_irq_data *irq_data = data;
+	struct safexcel_crypto_priv *priv = irq_data->priv;
+	int ring = irq_data->ring, rc = IRQ_NONE;
+	u32 status, stat;
+
+	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+	if (!status)
+		return rc;
+
+	/* RDR interrupts */
+	if (status & EIP197_RDR_IRQ(ring)) {
+		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
+
+		if (unlikely(stat & EIP197_xDR_ERR)) {
+			/*
+			 * Fatal error, the RDR is unusable and must be
+			 * reinitialized. This should not happen under
+			 * normal circumstances.
+			 */
+			dev_err(priv->dev, "RDR: fatal error.");
+		} else if (likely(stat & EIP197_xDR_THRESH)) {
+			rc = IRQ_WAKE_THREAD;
+		}
+
+		/* ACK the interrupts */
+		writel(stat & 0xff,
+		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
+	}
+
+	/* ACK the interrupts */
+	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+	return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+	struct safexcel_ring_irq_data *irq_data = data;
+	struct safexcel_crypto_priv *priv = irq_data->priv;
+	int ring = irq_data->ring;
+
+	safexcel_handle_result_descriptor(priv, ring);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return IRQ_HANDLED;
+}
+
+static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
+				     irq_handler_t handler,
+				     irq_handler_t threaded_handler,
+				     struct safexcel_ring_irq_data *ring_irq_priv)
+{
+	int ret, irq = platform_get_irq_byname(pdev, name);
+
+	if (irq < 0) {
+		dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
+		return irq;
+	}
+
+	ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
+					threaded_handler, IRQF_ONESHOT,
+					dev_name(&pdev->dev), ring_irq_priv);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
+		return ret;
+	}
+
+	return irq;
+}
+
+static struct safexcel_alg_template *safexcel_algs[] = {
+	&safexcel_alg_ecb_des,
+	&safexcel_alg_cbc_des,
+	&safexcel_alg_ecb_des3_ede,
+	&safexcel_alg_cbc_des3_ede,
+	&safexcel_alg_ecb_aes,
+	&safexcel_alg_cbc_aes,
+	&safexcel_alg_md5,
+	&safexcel_alg_sha1,
+	&safexcel_alg_sha224,
+	&safexcel_alg_sha256,
+	&safexcel_alg_sha384,
+	&safexcel_alg_sha512,
+	&safexcel_alg_hmac_md5,
+	&safexcel_alg_hmac_sha1,
+	&safexcel_alg_hmac_sha224,
+	&safexcel_alg_hmac_sha256,
+	&safexcel_alg_hmac_sha384,
+	&safexcel_alg_hmac_sha512,
+	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
+};
+
+static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
+{
+	int i, j, ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+		safexcel_algs[i]->priv = priv;
+
+		if (!(safexcel_algs[i]->engines & priv->version))
+			continue;
+
+		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
+		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
+		else
+			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
+
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	for (j = 0; j < i; j++) {
+		if (!(safexcel_algs[j]->engines & priv->version))
+			continue;
+
+		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
+		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
+		else
+			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
+	}
+
+	return ret;
+}
+
+static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+		if (!(safexcel_algs[i]->engines & priv->version))
+			continue;
+
+		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
+		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
+		else
+			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
+	}
+}
+
+static void safexcel_configure(struct safexcel_crypto_priv *priv)
+{
+	u32 val, mask = 0;
+
+	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+
+	/* Read number of PEs from the engine */
+	switch (priv->version) {
+	case EIP197B:
+	case EIP197D:
+		mask = EIP197_N_PES_MASK;
+		break;
+	default:
+		mask = EIP97_N_PES_MASK;
+	}
+	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+
+	val = (val & GENMASK(27, 25)) >> 25;
+	mask = BIT(val) - 1;
+
+	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
+
+	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
+	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+
+	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
+	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
+}
+
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+	struct safexcel_register_offsets *offsets = &priv->offsets;
+
+	switch (priv->version) {
+	case EIP197B:
+	case EIP197D:
+		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
+		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
+		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
+		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
+		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
+		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
+		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
+		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
+		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
+		offsets->pe		= EIP197_PE_BASE;
+		break;
+	case EIP97IES:
+		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
+		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
+		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
+		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
+		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
+		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
+		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
+		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
+		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
+		offsets->pe		= EIP97_PE_BASE;
+		break;
+	}
+}
+
+static int safexcel_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct safexcel_crypto_priv *priv;
+	int i, ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+	if (priv->version == EIP197B || priv->version == EIP197D)
+		priv->flags |= EIP197_TRC_CACHE;
+
+	safexcel_init_register_offsets(priv);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base)) {
+		dev_err(dev, "failed to get resource\n");
+		return PTR_ERR(priv->base);
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	ret = PTR_ERR_OR_ZERO(priv->clk);
+	/* The clock isn't mandatory */
+	if  (ret != -ENOENT) {
+		if (ret)
+			return ret;
+
+		ret = clk_prepare_enable(priv->clk);
+		if (ret) {
+			dev_err(dev, "unable to enable clk (%d)\n", ret);
+			return ret;
+		}
+	}
+
+	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
+	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
+	/* The clock isn't mandatory */
+	if  (ret != -ENOENT) {
+		if (ret)
+			goto err_core_clk;
+
+		ret = clk_prepare_enable(priv->reg_clk);
+		if (ret) {
+			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
+			goto err_core_clk;
+		}
+	}
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret)
+		goto err_reg_clk;
+
+	priv->context_pool = dmam_pool_create("safexcel-context", dev,
+					      sizeof(struct safexcel_context_record),
+					      1, 0);
+	if (!priv->context_pool) {
+		ret = -ENOMEM;
+		goto err_reg_clk;
+	}
+
+	safexcel_configure(priv);
+
+	priv->ring = devm_kcalloc(dev, priv->config.rings,
+				  sizeof(*priv->ring),
+				  GFP_KERNEL);
+	if (!priv->ring) {
+		ret = -ENOMEM;
+		goto err_reg_clk;
+	}
+
+	for (i = 0; i < priv->config.rings; i++) {
+		char irq_name[6] = {0}; /* "ringX\0" */
+		char wq_name[9] = {0}; /* "wq_ringX\0" */
+		int irq;
+		struct safexcel_ring_irq_data *ring_irq;
+
+		ret = safexcel_init_ring_descriptors(priv,
+						     &priv->ring[i].cdr,
+						     &priv->ring[i].rdr);
+		if (ret)
+			goto err_reg_clk;
+
+		priv->ring[i].rdr_req = devm_kcalloc(dev,
+			EIP197_DEFAULT_RING_SIZE,
+			sizeof(priv->ring[i].rdr_req),
+			GFP_KERNEL);
+		if (!priv->ring[i].rdr_req) {
+			ret = -ENOMEM;
+			goto err_reg_clk;
+		}
+
+		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
+		if (!ring_irq) {
+			ret = -ENOMEM;
+			goto err_reg_clk;
+		}
+
+		ring_irq->priv = priv;
+		ring_irq->ring = i;
+
+		snprintf(irq_name, 6, "ring%d", i);
+		irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+						safexcel_irq_ring_thread,
+						ring_irq);
+		if (irq < 0) {
+			ret = irq;
+			goto err_reg_clk;
+		}
+
+		priv->ring[i].work_data.priv = priv;
+		priv->ring[i].work_data.ring = i;
+		INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
+
+		snprintf(wq_name, 9, "wq_ring%d", i);
+		priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
+		if (!priv->ring[i].workqueue) {
+			ret = -ENOMEM;
+			goto err_reg_clk;
+		}
+
+		priv->ring[i].requests = 0;
+		priv->ring[i].busy = false;
+
+		crypto_init_queue(&priv->ring[i].queue,
+				  EIP197_DEFAULT_RING_SIZE);
+
+		spin_lock_init(&priv->ring[i].lock);
+		spin_lock_init(&priv->ring[i].queue_lock);
+	}
+
+	platform_set_drvdata(pdev, priv);
+	atomic_set(&priv->ring_used, 0);
+
+	ret = safexcel_hw_init(priv);
+	if (ret) {
+		dev_err(dev, "EIP h/w init failed (%d)\n", ret);
+		goto err_reg_clk;
+	}
+
+	ret = safexcel_register_algorithms(priv);
+	if (ret) {
+		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
+		goto err_reg_clk;
+	}
+
+	return 0;
+
+err_reg_clk:
+	clk_disable_unprepare(priv->reg_clk);
+err_core_clk:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* clear any pending interrupt */
+		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+		/* Reset the CDR base address */
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		/* Reset the RDR base address */
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+	}
+}
+
+static int safexcel_remove(struct platform_device *pdev)
+{
+	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
+	int i;
+
+	safexcel_unregister_algorithms(priv);
+	safexcel_hw_reset_rings(priv);
+
+	clk_disable_unprepare(priv->clk);
+
+	for (i = 0; i < priv->config.rings; i++)
+		destroy_workqueue(priv->ring[i].workqueue);
+
+	return 0;
+}
+
+static const struct of_device_id safexcel_of_match_table[] = {
+	{
+		.compatible = "inside-secure,safexcel-eip97ies",
+		.data = (void *)EIP97IES,
+	},
+	{
+		.compatible = "inside-secure,safexcel-eip197b",
+		.data = (void *)EIP197B,
+	},
+	{
+		.compatible = "inside-secure,safexcel-eip197d",
+		.data = (void *)EIP197D,
+	},
+	{
+		/* Deprecated. Kept for backward compatibility. */
+		.compatible = "inside-secure,safexcel-eip97",
+		.data = (void *)EIP97IES,
+	},
+	{
+		/* Deprecated. Kept for backward compatibility. */
+		.compatible = "inside-secure,safexcel-eip197",
+		.data = (void *)EIP197B,
+	},
+	{},
+};
+
+
+static struct platform_driver  crypto_safexcel = {
+	.probe		= safexcel_probe,
+	.remove		= safexcel_remove,
+	.driver		= {
+		.name	= "crypto-safexcel",
+		.of_match_table = safexcel_of_match_table,
+	},
+};
+module_platform_driver(crypto_safexcel);
+
+MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
+MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
+MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
+MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
new file mode 100644
index 0000000..65624a8
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#ifndef __SAFEXCEL_H__
+#define __SAFEXCEL_H__
+
+#include <crypto/aead.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/skcipher.h>
+
+#define EIP197_HIA_VERSION_LE			0xca35
+#define EIP197_HIA_VERSION_BE			0x35ca
+
+/* Static configuration */
+#define EIP197_DEFAULT_RING_SIZE		400
+#define EIP197_MAX_TOKENS			8
+#define EIP197_MAX_RINGS			4
+#define EIP197_FETCH_COUNT			1
+#define EIP197_MAX_BATCH_SZ			64
+
+#define EIP197_GFP_FLAGS(base)	((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
+				 GFP_KERNEL : GFP_ATOMIC)
+
+/* Custom on-stack requests (for invalidation) */
+#define EIP197_SKCIPHER_REQ_SIZE	sizeof(struct skcipher_request) + \
+					sizeof(struct safexcel_cipher_req)
+#define EIP197_AHASH_REQ_SIZE		sizeof(struct ahash_request) + \
+					sizeof(struct safexcel_ahash_req)
+#define EIP197_AEAD_REQ_SIZE		sizeof(struct aead_request) + \
+					sizeof(struct safexcel_cipher_req)
+#define EIP197_REQUEST_ON_STACK(name, type, size) \
+	char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \
+	struct type##_request *name = (void *)__##name##_desc
+
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv)		((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv)		((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv)		((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv)	((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv)		((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv)	((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv)		((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv)	((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv)	((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv)			((priv)->base + (priv)->offsets.pe)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE		0x90000
+#define EIP197_HIA_AIC_G_BASE		0x90000
+#define EIP197_HIA_AIC_R_BASE		0x90800
+#define EIP197_HIA_AIC_xDR_BASE		0x80000
+#define EIP197_HIA_DFE_BASE		0x8c000
+#define EIP197_HIA_DFE_THR_BASE		0x8c040
+#define EIP197_HIA_DSE_BASE		0x8d000
+#define EIP197_HIA_DSE_THR_BASE		0x8d040
+#define EIP197_HIA_GEN_CFG_BASE		0xf0000
+#define EIP197_PE_BASE			0xa0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE		0x0
+#define EIP97_HIA_AIC_G_BASE		0x0
+#define EIP97_HIA_AIC_R_BASE		0x0
+#define EIP97_HIA_AIC_xDR_BASE		0x0
+#define EIP97_HIA_DFE_BASE		0xf000
+#define EIP97_HIA_DFE_THR_BASE		0xf200
+#define EIP97_HIA_DSE_BASE		0xf400
+#define EIP97_HIA_DSE_THR_BASE		0xf600
+#define EIP97_HIA_GEN_CFG_BASE		0x10000
+#define EIP97_PE_BASE			0x10000
+
+/* CDR/RDR register offsets */
+#define EIP197_HIA_xDR_OFF(priv, r)		(EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r)			(EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r)			(EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO	0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI	0x0004
+#define EIP197_HIA_xDR_RING_SIZE		0x0018
+#define EIP197_HIA_xDR_DESC_SIZE		0x001c
+#define EIP197_HIA_xDR_CFG			0x0020
+#define EIP197_HIA_xDR_DMA_CFG			0x0024
+#define EIP197_HIA_xDR_THRESH			0x0028
+#define EIP197_HIA_xDR_PREP_COUNT		0x002c
+#define EIP197_HIA_xDR_PROC_COUNT		0x0030
+#define EIP197_HIA_xDR_PREP_PNTR		0x0034
+#define EIP197_HIA_xDR_PROC_PNTR		0x0038
+#define EIP197_HIA_xDR_STAT			0x003c
+
+/* register offsets */
+#define EIP197_HIA_DFE_CFG(n)			(0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_CTRL(n)		(0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_STAT(n)		(0x0004 + (128 * (n)))
+#define EIP197_HIA_DSE_CFG(n)			(0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_CTRL(n)		(0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_STAT(n)		(0x0004 + (128 * (n)))
+#define EIP197_HIA_RA_PE_CTRL(n)		(0x0010 + (8   * (n)))
+#define EIP197_HIA_RA_PE_STAT			0x0014
+#define EIP197_HIA_AIC_R_OFF(r)			((r) * 0x1000)
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r)		(0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r)	(0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r)			(0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r)		(0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL		0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT		0xf810
+#define EIP197_HIA_AIC_G_ACK			0xf810
+#define EIP197_HIA_MST_CTRL			0xfff4
+#define EIP197_HIA_OPTIONS			0xfff8
+#define EIP197_HIA_VERSION			0xfffc
+#define EIP197_PE_IN_DBUF_THRES(n)		(0x0000 + (0x2000 * (n)))
+#define EIP197_PE_IN_TBUF_THRES(n)		(0x0100 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_RAM(n)		(0x0800 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUE_CTRL(n)		(0x0c80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_CTRL(n)		(0x0d04 + (0x2000 * (n)))
+#define EIP197_PE_ICE_FPP_CTRL(n)		(0x0d80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_RAM_CTRL(n)		(0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION_EN(n)		(0x1004 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_CTRL(n)		(0x1008 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_STAT(n)		(0x100c + (0x2000 * (n)))
+#define EIP197_PE_OUT_DBUF_THRES(n)		(0x1c00 + (0x2000 * (n)))
+#define EIP197_PE_OUT_TBUF_THRES(n)		(0x1d00 + (0x2000 * (n)))
+#define EIP197_MST_CTRL				0xfff4
+
+/* EIP197-specific registers, no indirection */
+#define EIP197_CLASSIFICATION_RAMS		0xe0000
+#define EIP197_TRC_CTRL				0xf0800
+#define EIP197_TRC_LASTRES			0xf0804
+#define EIP197_TRC_REGINDEX			0xf0808
+#define EIP197_TRC_PARAMS			0xf0820
+#define EIP197_TRC_FREECHAIN			0xf0824
+#define EIP197_TRC_PARAMS2			0xf0828
+#define EIP197_TRC_ECCCTRL			0xf0830
+#define EIP197_TRC_ECCSTAT			0xf0834
+#define EIP197_TRC_ECCADMINSTAT			0xf0838
+#define EIP197_TRC_ECCDATASTAT			0xf083c
+#define EIP197_TRC_ECCDATA			0xf0840
+#define EIP197_CS_RAM_CTRL			0xf7ff0
+
+/* EIP197_HIA_xDR_DESC_SIZE */
+#define EIP197_xDR_DESC_MODE_64BIT		BIT(31)
+
+/* EIP197_HIA_xDR_DMA_CFG */
+#define EIP197_HIA_xDR_WR_RES_BUF		BIT(22)
+#define EIP197_HIA_xDR_WR_CTRL_BUF		BIT(23)
+#define EIP197_HIA_xDR_WR_OWN_BUF		BIT(24)
+#define EIP197_HIA_xDR_CFG_WR_CACHE(n)		(((n) & 0x7) << 25)
+#define EIP197_HIA_xDR_CFG_RD_CACHE(n)		(((n) & 0x7) << 29)
+
+/* EIP197_HIA_CDR_THRESH */
+#define EIP197_HIA_CDR_THRESH_PROC_PKT(n)	(n)
+#define EIP197_HIA_CDR_THRESH_PROC_MODE		BIT(22)
+#define EIP197_HIA_CDR_THRESH_PKT_MODE		BIT(23)
+#define EIP197_HIA_CDR_THRESH_TIMEOUT(n)	((n) << 24) /* x256 clk cycles */
+
+/* EIP197_HIA_RDR_THRESH */
+#define EIP197_HIA_RDR_THRESH_PROC_PKT(n)	(n)
+#define EIP197_HIA_RDR_THRESH_PKT_MODE		BIT(23)
+#define EIP197_HIA_RDR_THRESH_TIMEOUT(n)	((n) << 24) /* x256 clk cycles */
+
+/* EIP197_HIA_xDR_PREP_COUNT */
+#define EIP197_xDR_PREP_CLR_COUNT		BIT(31)
+
+/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET		24
+#define EIP197_xDR_PROC_xD_PKT_MASK		GENMASK(6, 0)
+#define EIP197_xDR_PROC_xD_COUNT(n)		((n) << 2)
+#define EIP197_xDR_PROC_xD_PKT(n)		((n) << 24)
+#define EIP197_xDR_PROC_CLR_COUNT		BIT(31)
+
+/* EIP197_HIA_xDR_STAT */
+#define EIP197_xDR_DMA_ERR			BIT(0)
+#define EIP197_xDR_PREP_CMD_THRES		BIT(1)
+#define EIP197_xDR_ERR				BIT(2)
+#define EIP197_xDR_THRESH			BIT(4)
+#define EIP197_xDR_TIMEOUT			BIT(5)
+
+#define EIP197_HIA_RA_PE_CTRL_RESET		BIT(31)
+#define EIP197_HIA_RA_PE_CTRL_EN		BIT(30)
+
+/* EIP197_HIA_OPTIONS */
+#define EIP197_N_PES_OFFSET			4
+#define EIP197_N_PES_MASK			GENMASK(4, 0)
+#define EIP97_N_PES_MASK			GENMASK(2, 0)
+
+/* EIP197_HIA_AIC_R_ENABLE_CTRL */
+#define EIP197_CDR_IRQ(n)			BIT((n) * 2)
+#define EIP197_RDR_IRQ(n)			BIT((n) * 2 + 1)
+
+/* EIP197_HIA_DFE/DSE_CFG */
+#define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n)	((n) << 0)
+#define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n)	(((n) & 0x7) << 4)
+#define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n)	((n) << 8)
+#define EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE	GENMASK(15, 14)
+#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n)	((n) << 16)
+#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n)	(((n) & 0x7) << 20)
+#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n)	((n) << 24)
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG		(BIT(31) | BIT(29))
+#define EIP197_HIA_DSE_CFG_EN_SINGLE_WR		BIT(29)
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG		BIT(31)
+
+/* EIP197_HIA_DFE/DSE_THR_CTRL */
+#define EIP197_DxE_THR_CTRL_EN			BIT(30)
+#define EIP197_DxE_THR_CTRL_RESET_PE		BIT(31)
+
+/* EIP197_HIA_AIC_G_ENABLED_STAT */
+#define EIP197_G_IRQ_DFE(n)			BIT((n) << 1)
+#define EIP197_G_IRQ_DSE(n)			BIT(((n) << 1) + 1)
+#define EIP197_G_IRQ_RING			BIT(16)
+#define EIP197_G_IRQ_PE(n)			BIT((n) + 20)
+
+/* EIP197_HIA_MST_CTRL */
+#define RD_CACHE_3BITS				0x5
+#define WR_CACHE_3BITS				0x3
+#define RD_CACHE_4BITS				(RD_CACHE_3BITS << 1 | BIT(0))
+#define WR_CACHE_4BITS				(WR_CACHE_3BITS << 1 | BIT(0))
+#define EIP197_MST_CTRL_RD_CACHE(n)		(((n) & 0xf) << 0)
+#define EIP197_MST_CTRL_WD_CACHE(n)		(((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_TX_MAX_CMD(n)		(((n) & 0xf) << 20)
+#define EIP197_MST_CTRL_BYTE_SWAP		BIT(24)
+#define EIP197_MST_CTRL_NO_BYTE_SWAP		BIT(25)
+
+/* EIP197_PE_IN_DBUF/TBUF_THRES */
+#define EIP197_PE_IN_xBUF_THRES_MIN(n)		((n) << 8)
+#define EIP197_PE_IN_xBUF_THRES_MAX(n)		((n) << 12)
+
+/* EIP197_PE_OUT_DBUF_THRES */
+#define EIP197_PE_OUT_DBUF_THRES_MIN(n)		((n) << 0)
+#define EIP197_PE_OUT_DBUF_THRES_MAX(n)		((n) << 4)
+
+/* EIP197_PE_ICE_SCRATCH_CTRL */
+#define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER		BIT(2)
+#define EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN		BIT(3)
+#define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS	BIT(24)
+#define EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS	BIT(25)
+
+/* EIP197_PE_ICE_SCRATCH_RAM */
+#define EIP197_NUM_OF_SCRATCH_BLOCKS		32
+
+/* EIP197_PE_ICE_PUE/FPP_CTRL */
+#define EIP197_PE_ICE_x_CTRL_SW_RESET			BIT(0)
+#define EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR		BIT(14)
+#define EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR		BIT(15)
+
+/* EIP197_PE_ICE_RAM_CTRL */
+#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN	BIT(0)
+#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN	BIT(1)
+
+/* EIP197_PE_EIP96_FUNCTION_EN */
+#define EIP197_FUNCTION_RSVD			(BIT(6) | BIT(15) | BIT(20) | BIT(23))
+#define EIP197_PROTOCOL_HASH_ONLY		BIT(0)
+#define EIP197_PROTOCOL_ENCRYPT_ONLY		BIT(1)
+#define EIP197_PROTOCOL_HASH_ENCRYPT		BIT(2)
+#define EIP197_PROTOCOL_HASH_DECRYPT		BIT(3)
+#define EIP197_PROTOCOL_ENCRYPT_HASH		BIT(4)
+#define EIP197_PROTOCOL_DECRYPT_HASH		BIT(5)
+#define EIP197_ALG_ARC4				BIT(7)
+#define EIP197_ALG_AES_ECB			BIT(8)
+#define EIP197_ALG_AES_CBC			BIT(9)
+#define EIP197_ALG_AES_CTR_ICM			BIT(10)
+#define EIP197_ALG_AES_OFB			BIT(11)
+#define EIP197_ALG_AES_CFB			BIT(12)
+#define EIP197_ALG_DES_ECB			BIT(13)
+#define EIP197_ALG_DES_CBC			BIT(14)
+#define EIP197_ALG_DES_OFB			BIT(16)
+#define EIP197_ALG_DES_CFB			BIT(17)
+#define EIP197_ALG_3DES_ECB			BIT(18)
+#define EIP197_ALG_3DES_CBC			BIT(19)
+#define EIP197_ALG_3DES_OFB			BIT(21)
+#define EIP197_ALG_3DES_CFB			BIT(22)
+#define EIP197_ALG_MD5				BIT(24)
+#define EIP197_ALG_HMAC_MD5			BIT(25)
+#define EIP197_ALG_SHA1				BIT(26)
+#define EIP197_ALG_HMAC_SHA1			BIT(27)
+#define EIP197_ALG_SHA2				BIT(28)
+#define EIP197_ALG_HMAC_SHA2			BIT(29)
+#define EIP197_ALG_AES_XCBC_MAC			BIT(30)
+#define EIP197_ALG_GCM_HASH			BIT(31)
+
+/* EIP197_PE_EIP96_CONTEXT_CTRL */
+#define EIP197_CONTEXT_SIZE(n)			(n)
+#define EIP197_ADDRESS_MODE			BIT(8)
+#define EIP197_CONTROL_MODE			BIT(9)
+
+/* Context Control */
+struct safexcel_context_record {
+	u32 control0;
+	u32 control1;
+
+	__le32 data[40];
+} __packed;
+
+/* control0 */
+#define CONTEXT_CONTROL_TYPE_NULL_OUT		0x0
+#define CONTEXT_CONTROL_TYPE_NULL_IN		0x1
+#define CONTEXT_CONTROL_TYPE_HASH_OUT		0x2
+#define CONTEXT_CONTROL_TYPE_HASH_IN		0x3
+#define CONTEXT_CONTROL_TYPE_CRYPTO_OUT		0x4
+#define CONTEXT_CONTROL_TYPE_CRYPTO_IN		0x5
+#define CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT	0x6
+#define CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN	0x7
+#define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT	0xe
+#define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN	0xf
+#define CONTEXT_CONTROL_RESTART_HASH		BIT(4)
+#define CONTEXT_CONTROL_NO_FINISH_HASH		BIT(5)
+#define CONTEXT_CONTROL_SIZE(n)			((n) << 8)
+#define CONTEXT_CONTROL_KEY_EN			BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_DES		(0x0 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_3DES		(0x2 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES128	(0x5 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES192	(0x6 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES256	(0x7 << 17)
+#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED	(0x1 << 21)
+#define CONTEXT_CONTROL_DIGEST_HMAC		(0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_MD5		(0x0 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1		(0x2 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224	(0x4 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256	(0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384	(0x6 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512	(0x5 << 23)
+#define CONTEXT_CONTROL_INV_FR			(0x5 << 24)
+#define CONTEXT_CONTROL_INV_TR			(0x6 << 24)
+
+/* control1 */
+#define CONTEXT_CONTROL_CRYPTO_MODE_ECB		(0 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_CBC		(1 << 0)
+#define CONTEXT_CONTROL_IV0			BIT(5)
+#define CONTEXT_CONTROL_IV1			BIT(6)
+#define CONTEXT_CONTROL_IV2			BIT(7)
+#define CONTEXT_CONTROL_IV3			BIT(8)
+#define CONTEXT_CONTROL_DIGEST_CNT		BIT(9)
+#define CONTEXT_CONTROL_COUNTER_MODE		BIT(10)
+#define CONTEXT_CONTROL_HASH_STORE		BIT(19)
+
+/* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+#define EIP197_COUNTER_BLOCK_SIZE		64
+
+/* EIP197_CS_RAM_CTRL */
+#define EIP197_TRC_ENABLE_0			BIT(4)
+#define EIP197_TRC_ENABLE_1			BIT(5)
+#define EIP197_TRC_ENABLE_2			BIT(6)
+#define EIP197_TRC_ENABLE_MASK			GENMASK(6, 4)
+
+/* EIP197_TRC_PARAMS */
+#define EIP197_TRC_PARAMS_SW_RESET		BIT(0)
+#define EIP197_TRC_PARAMS_DATA_ACCESS		BIT(2)
+#define EIP197_TRC_PARAMS_HTABLE_SZ(x)		((x) << 4)
+#define EIP197_TRC_PARAMS_BLK_TIMER_SPEED(x)	((x) << 10)
+#define EIP197_TRC_PARAMS_RC_SZ_LARGE(n)	((n) << 18)
+
+/* EIP197_TRC_FREECHAIN */
+#define EIP197_TRC_FREECHAIN_HEAD_PTR(p)	(p)
+#define EIP197_TRC_FREECHAIN_TAIL_PTR(p)	((p) << 16)
+
+/* EIP197_TRC_PARAMS2 */
+#define EIP197_TRC_PARAMS2_HTABLE_PTR(p)	(p)
+#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n)	((n) << 18)
+
+/* Cache helpers */
+#define EIP197B_CS_RC_MAX			52
+#define EIP197D_CS_RC_MAX			96
+#define EIP197_CS_RC_SIZE			(4 * sizeof(u32))
+#define EIP197_CS_RC_NEXT(x)			(x)
+#define EIP197_CS_RC_PREV(x)			((x) << 10)
+#define EIP197_RC_NULL				0x3ff
+#define EIP197B_CS_TRC_REC_WC			59
+#define EIP197D_CS_TRC_REC_WC			64
+#define EIP197B_CS_TRC_LG_REC_WC		73
+#define EIP197D_CS_TRC_LG_REC_WC		80
+#define EIP197B_CS_HT_WC			64
+#define EIP197D_CS_HT_WC			256
+
+
+/* Result data */
+struct result_data_desc {
+	u32 packet_length:17;
+	u32 error_code:15;
+
+	u8 bypass_length:4;
+	u8 e15:1;
+	u16 rsvd0;
+	u8 hash_bytes:1;
+	u8 hash_length:6;
+	u8 generic_bytes:1;
+	u8 checksum:1;
+	u8 next_header:1;
+	u8 length:1;
+
+	u16 application_id;
+	u16 rsvd1;
+
+	u32 rsvd2;
+} __packed;
+
+
+/* Basic Result Descriptor format */
+struct safexcel_result_desc {
+	u32 particle_size:17;
+	u8 rsvd0:3;
+	u8 descriptor_overflow:1;
+	u8 buffer_overflow:1;
+	u8 last_seg:1;
+	u8 first_seg:1;
+	u16 result_size:8;
+
+	u32 rsvd1;
+
+	u32 data_lo;
+	u32 data_hi;
+
+	struct result_data_desc result_data;
+} __packed;
+
+struct safexcel_token {
+	u32 packet_length:17;
+	u8 stat:2;
+	u16 instructions:9;
+	u8 opcode:4;
+} __packed;
+
+#define EIP197_TOKEN_HASH_RESULT_VERIFY		BIT(16)
+
+#define EIP197_TOKEN_STAT_LAST_HASH		BIT(0)
+#define EIP197_TOKEN_STAT_LAST_PACKET		BIT(1)
+#define EIP197_TOKEN_OPCODE_DIRECTION		0x0
+#define EIP197_TOKEN_OPCODE_INSERT		0x2
+#define EIP197_TOKEN_OPCODE_NOOP		EIP197_TOKEN_OPCODE_INSERT
+#define EIP197_TOKEN_OPCODE_RETRIEVE		0x4
+#define EIP197_TOKEN_OPCODE_VERIFY		0xd
+#define EIP197_TOKEN_OPCODE_BYPASS		GENMASK(3, 0)
+
+static inline void eip197_noop_token(struct safexcel_token *token)
+{
+	token->opcode = EIP197_TOKEN_OPCODE_NOOP;
+	token->packet_length = BIT(2);
+}
+
+/* Instructions */
+#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST	0x1c
+#define EIP197_TOKEN_INS_TYPE_OUTPUT		BIT(5)
+#define EIP197_TOKEN_INS_TYPE_HASH		BIT(6)
+#define EIP197_TOKEN_INS_TYPE_CRYTO		BIT(7)
+#define EIP197_TOKEN_INS_LAST			BIT(8)
+
+/* Processing Engine Control Data  */
+struct safexcel_control_data_desc {
+	u32 packet_length:17;
+	u16 options:13;
+	u8 type:2;
+
+	u16 application_id;
+	u16 rsvd;
+
+	u8 refresh:2;
+	u32 context_lo:30;
+	u32 context_hi;
+
+	u32 control0;
+	u32 control1;
+
+	u32 token[EIP197_MAX_TOKENS];
+} __packed;
+
+#define EIP197_OPTION_MAGIC_VALUE	BIT(0)
+#define EIP197_OPTION_64BIT_CTX		BIT(1)
+#define EIP197_OPTION_CTX_CTRL_IN_CMD	BIT(8)
+#define EIP197_OPTION_2_TOKEN_IV_CMD	GENMASK(11, 10)
+#define EIP197_OPTION_4_TOKEN_IV_CMD	GENMASK(11, 9)
+
+#define EIP197_TYPE_EXTENDED		0x3
+
+/* Basic Command Descriptor format */
+struct safexcel_command_desc {
+	u32 particle_size:17;
+	u8 rsvd0:5;
+	u8 last_seg:1;
+	u8 first_seg:1;
+	u16 additional_cdata_size:8;
+
+	u32 rsvd1;
+
+	u32 data_lo;
+	u32 data_hi;
+
+	struct safexcel_control_data_desc control_data;
+} __packed;
+
+/*
+ * Internal structures & functions
+ */
+
+enum eip197_fw {
+	FW_IFPP = 0,
+	FW_IPUE,
+	FW_NB
+};
+
+struct safexcel_desc_ring {
+	void *base;
+	void *base_end;
+	dma_addr_t base_dma;
+
+	/* write and read pointers */
+	void *write;
+	void *read;
+
+	/* descriptor element offset */
+	unsigned offset;
+};
+
+enum safexcel_alg_type {
+	SAFEXCEL_ALG_TYPE_SKCIPHER,
+	SAFEXCEL_ALG_TYPE_AEAD,
+	SAFEXCEL_ALG_TYPE_AHASH,
+};
+
+struct safexcel_config {
+	u32 pes;
+	u32 rings;
+
+	u32 cd_size;
+	u32 cd_offset;
+
+	u32 rd_size;
+	u32 rd_offset;
+};
+
+struct safexcel_work_data {
+	struct work_struct work;
+	struct safexcel_crypto_priv *priv;
+	int ring;
+};
+
+struct safexcel_ring {
+	spinlock_t lock;
+
+	struct workqueue_struct *workqueue;
+	struct safexcel_work_data work_data;
+
+	/* command/result rings */
+	struct safexcel_desc_ring cdr;
+	struct safexcel_desc_ring rdr;
+
+	/* result ring crypto API request */
+	struct crypto_async_request **rdr_req;
+
+	/* queue */
+	struct crypto_queue queue;
+	spinlock_t queue_lock;
+
+	/* Number of requests in the engine. */
+	int requests;
+
+	/* The ring is currently handling at least one request */
+	bool busy;
+
+	/* Store for current requests when bailing out of the dequeueing
+	 * function when no enough resources are available.
+	 */
+	struct crypto_async_request *req;
+	struct crypto_async_request *backlog;
+};
+
+enum safexcel_eip_version {
+	EIP97IES = BIT(0),
+	EIP197B  = BIT(1),
+	EIP197D  = BIT(2),
+};
+
+struct safexcel_register_offsets {
+	u32 hia_aic;
+	u32 hia_aic_g;
+	u32 hia_aic_r;
+	u32 hia_aic_xdr;
+	u32 hia_dfe;
+	u32 hia_dfe_thr;
+	u32 hia_dse;
+	u32 hia_dse_thr;
+	u32 hia_gen_cfg;
+	u32 pe;
+};
+
+enum safexcel_flags {
+	EIP197_TRC_CACHE = BIT(0),
+};
+
+struct safexcel_crypto_priv {
+	void __iomem *base;
+	struct device *dev;
+	struct clk *clk;
+	struct clk *reg_clk;
+	struct safexcel_config config;
+
+	enum safexcel_eip_version version;
+	struct safexcel_register_offsets offsets;
+	u32 flags;
+
+	/* context DMA pool */
+	struct dma_pool *context_pool;
+
+	atomic_t ring_used;
+
+	struct safexcel_ring *ring;
+};
+
+struct safexcel_context {
+	int (*send)(struct crypto_async_request *req, int ring,
+		    int *commands, int *results);
+	int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
+			     struct crypto_async_request *req, bool *complete,
+			     int *ret);
+	struct safexcel_context_record *ctxr;
+	dma_addr_t ctxr_dma;
+
+	int ring;
+	bool needs_inv;
+	bool exit_inv;
+};
+
+struct safexcel_ahash_export_state {
+	u64 len[2];
+	u64 processed[2];
+
+	u32 digest;
+
+	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u8 cache[SHA512_BLOCK_SIZE];
+};
+
+/*
+ * Template structure to describe the algorithms in order to register them.
+ * It also has the purpose to contain our private structure and is actually
+ * the only way I know in this framework to avoid having global pointers...
+ */
+struct safexcel_alg_template {
+	struct safexcel_crypto_priv *priv;
+	enum safexcel_alg_type type;
+	u32 engines;
+	union {
+		struct skcipher_alg skcipher;
+		struct aead_alg aead;
+		struct ahash_alg ahash;
+	} alg;
+};
+
+struct safexcel_inv_result {
+	struct completion completion;
+	int error;
+};
+
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
+int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+				struct safexcel_result_desc *rdesc);
+void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
+int safexcel_invalidate_cache(struct crypto_async_request *async,
+			      struct safexcel_crypto_priv *priv,
+			      dma_addr_t ctxr_dma, int ring);
+int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
+				   struct safexcel_desc_ring *cdr,
+				   struct safexcel_desc_ring *rdr);
+int safexcel_select_ring(struct safexcel_crypto_priv *priv);
+void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
+			      struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int  ring);
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+				 struct safexcel_desc_ring *ring);
+struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						 bool first, bool last,
+						 dma_addr_t data, u32 len,
+						 u32 full_data_len,
+						 dma_addr_t context);
+struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						bool first, bool last,
+						dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+				  int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+				  int ring,
+				  struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+			  int ring,
+			  struct safexcel_result_desc *rdesc,
+			  struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
+void safexcel_inv_complete(struct crypto_async_request *req, int error);
+int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
+			 void *istate, void *ostate);
+
+/* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_des;
+extern struct safexcel_alg_template safexcel_alg_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_ecb_aes;
+extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_md5;
+extern struct safexcel_alg_template safexcel_alg_sha1;
+extern struct safexcel_alg_template safexcel_alg_sha224;
+extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_sha384;
+extern struct safexcel_alg_template safexcel_alg_sha512;
+extern struct safexcel_alg_template safexcel_alg_hmac_md5;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
+
+#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
new file mode 100644
index 0000000..3aef1d4
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -0,0 +1,1336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "safexcel.h"
+
+enum safexcel_cipher_direction {
+	SAFEXCEL_ENCRYPT,
+	SAFEXCEL_DECRYPT,
+};
+
+enum safexcel_cipher_alg {
+	SAFEXCEL_DES,
+	SAFEXCEL_3DES,
+	SAFEXCEL_AES,
+};
+
+struct safexcel_cipher_ctx {
+	struct safexcel_context base;
+	struct safexcel_crypto_priv *priv;
+
+	u32 mode;
+	enum safexcel_cipher_alg alg;
+	bool aead;
+
+	__le32 key[8];
+	unsigned int key_len;
+
+	/* All the below is AEAD specific */
+	u32 hash_alg;
+	u32 state_sz;
+	u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+};
+
+struct safexcel_cipher_req {
+	enum safexcel_cipher_direction direction;
+	bool needs_inv;
+};
+
+static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+				    struct safexcel_command_desc *cdesc,
+				    u32 length)
+{
+	struct safexcel_token *token;
+	unsigned offset = 0;
+
+	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+		switch (ctx->alg) {
+		case SAFEXCEL_DES:
+			offset = DES_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+			break;
+		case SAFEXCEL_3DES:
+			offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+			break;
+
+		case SAFEXCEL_AES:
+			offset = AES_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+			break;
+		}
+	}
+
+	token = (struct safexcel_token *)(cdesc->control_data.token + offset);
+
+	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[0].packet_length = length;
+	token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
+			EIP197_TOKEN_STAT_LAST_HASH;
+	token[0].instructions = EIP197_TOKEN_INS_LAST |
+				EIP197_TOKEN_INS_TYPE_CRYTO |
+				EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
+
+static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+				struct safexcel_command_desc *cdesc,
+				enum safexcel_cipher_direction direction,
+				u32 cryptlen, u32 assoclen, u32 digestsize)
+{
+	struct safexcel_token *token;
+	unsigned offset = 0;
+
+	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+		offset = AES_BLOCK_SIZE / sizeof(u32);
+		memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+
+		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+	}
+
+	token = (struct safexcel_token *)(cdesc->control_data.token + offset);
+
+	if (direction == SAFEXCEL_DECRYPT)
+		cryptlen -= digestsize;
+
+	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[0].packet_length = assoclen;
+	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH |
+				EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+	token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[1].packet_length = cryptlen;
+	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
+	token[1].instructions = EIP197_TOKEN_INS_LAST |
+				EIP197_TOKEN_INS_TYPE_CRYTO |
+				EIP197_TOKEN_INS_TYPE_HASH |
+				EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+	if (direction == SAFEXCEL_ENCRYPT) {
+		token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
+		token[2].packet_length = digestsize;
+		token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				EIP197_TOKEN_STAT_LAST_PACKET;
+		token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+					EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+	} else {
+		token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+		token[2].packet_length = digestsize;
+		token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				EIP197_TOKEN_STAT_LAST_PACKET;
+		token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+		token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY;
+		token[3].packet_length = digestsize |
+					 EIP197_TOKEN_HASH_RESULT_VERIFY;
+		token[3].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				EIP197_TOKEN_STAT_LAST_PACKET;
+		token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+	}
+}
+
+static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
+					const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_aes_ctx aes;
+	int ret, i;
+
+	ret = crypto_aes_expand_key(&aes, key, len);
+	if (ret) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return ret;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < len / sizeof(u32); i++) {
+			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < len / sizeof(u32); i++)
+		ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
+
+	ctx->key_len = len;
+
+	memzero_explicit(&aes, sizeof(aes));
+	return 0;
+}
+
+static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
+				    unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_ahash_export_state istate, ostate;
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_authenc_keys keys;
+
+	if (crypto_authenc_extractkeys(&keys, key, len) != 0)
+		goto badkey;
+
+	if (keys.enckeylen > sizeof(ctx->key))
+		goto badkey;
+
+	/* Encryption key */
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
+	    memcmp(ctx->key, keys.enckey, keys.enckeylen))
+		ctx->base.needs_inv = true;
+
+	/* Auth key */
+	switch (ctx->hash_alg) {
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
+		if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA224:
+		if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA256:
+		if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
+		if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
+		if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	default:
+		dev_err(priv->dev, "aead: unsupported hash algorithm\n");
+		goto badkey;
+	}
+
+	crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
+				    CRYPTO_TFM_RES_MASK);
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
+	    (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
+	     memcmp(ctx->opad, ostate.state, ctx->state_sz)))
+		ctx->base.needs_inv = true;
+
+	/* Now copy the keys into the context */
+	memcpy(ctx->key, keys.enckey, keys.enckeylen);
+	ctx->key_len = keys.enckeylen;
+
+	memcpy(ctx->ipad, &istate.state, ctx->state_sz);
+	memcpy(ctx->opad, &ostate.state, ctx->state_sz);
+
+	memzero_explicit(&keys, sizeof(keys));
+	return 0;
+
+badkey:
+	crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+				    struct crypto_async_request *async,
+				    struct safexcel_cipher_req *sreq,
+				    struct safexcel_command_desc *cdesc)
+{
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ctrl_size;
+
+	if (ctx->aead) {
+		if (sreq->direction == SAFEXCEL_ENCRYPT)
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+		else
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
+	} else {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
+
+		/* The decryption control type is a combination of the
+		 * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all
+		 * types.
+		 */
+		if (sreq->direction == SAFEXCEL_DECRYPT)
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN;
+	}
+
+	cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
+	cdesc->control_data.control1 |= ctx->mode;
+
+	if (ctx->aead)
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
+						ctx->hash_alg;
+
+	if (ctx->alg == SAFEXCEL_DES) {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES;
+	} else if (ctx->alg == SAFEXCEL_3DES) {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES;
+	} else if (ctx->alg == SAFEXCEL_AES) {
+		switch (ctx->key_len) {
+		case AES_KEYSIZE_128:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+			break;
+		case AES_KEYSIZE_192:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+			break;
+		case AES_KEYSIZE_256:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+			break;
+		default:
+			dev_err(priv->dev, "aes keysize not supported: %u\n",
+				ctx->key_len);
+			return -EINVAL;
+		}
+	}
+
+	ctrl_size = ctx->key_len / sizeof(u32);
+	if (ctx->aead)
+		/* Take in account the ipad+opad digests */
+		ctrl_size += ctx->state_sz / sizeof(u32) * 2;
+	cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
+
+	return 0;
+}
+
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+				      struct crypto_async_request *async,
+				      struct scatterlist *src,
+				      struct scatterlist *dst,
+				      unsigned int cryptlen,
+				      struct safexcel_cipher_req *sreq,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_result_desc *rdesc;
+	int ndesc = 0;
+
+	*ret = 0;
+
+	do {
+		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+		if (IS_ERR(rdesc)) {
+			dev_err(priv->dev,
+				"cipher: result: could not retrieve the result descriptor\n");
+			*ret = PTR_ERR(rdesc);
+			break;
+		}
+
+		if (likely(!*ret))
+			*ret = safexcel_rdesc_check_errors(priv, rdesc);
+
+		ndesc++;
+	} while (!rdesc->last_seg);
+
+	safexcel_complete(priv, ring);
+
+	if (src == dst) {
+		dma_unmap_sg(priv->dev, src,
+			     sg_nents_for_len(src, cryptlen),
+			     DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(priv->dev, src,
+			     sg_nents_for_len(src, cryptlen),
+			     DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst,
+			     sg_nents_for_len(dst, cryptlen),
+			     DMA_FROM_DEVICE);
+	}
+
+	*should_complete = true;
+
+	return ndesc;
+}
+
+static int safexcel_send_req(struct crypto_async_request *base, int ring,
+			     struct safexcel_cipher_req *sreq,
+			     struct scatterlist *src, struct scatterlist *dst,
+			     unsigned int cryptlen, unsigned int assoclen,
+			     unsigned int digestsize, u8 *iv, int *commands,
+			     int *results)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_command_desc *cdesc;
+	struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
+	struct scatterlist *sg;
+	unsigned int totlen = cryptlen + assoclen;
+	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
+	int i, ret = 0;
+
+	if (src == dst) {
+		nr_src = dma_map_sg(priv->dev, src,
+				    sg_nents_for_len(src, totlen),
+				    DMA_BIDIRECTIONAL);
+		nr_dst = nr_src;
+		if (!nr_src)
+			return -EINVAL;
+	} else {
+		nr_src = dma_map_sg(priv->dev, src,
+				    sg_nents_for_len(src, totlen),
+				    DMA_TO_DEVICE);
+		if (!nr_src)
+			return -EINVAL;
+
+		nr_dst = dma_map_sg(priv->dev, dst,
+				    sg_nents_for_len(dst, totlen),
+				    DMA_FROM_DEVICE);
+		if (!nr_dst) {
+			dma_unmap_sg(priv->dev, src,
+				     sg_nents_for_len(src, totlen),
+				     DMA_TO_DEVICE);
+			return -EINVAL;
+		}
+	}
+
+	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
+
+	if (ctx->aead) {
+		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
+		       ctx->ipad, ctx->state_sz);
+		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
+		       ctx->opad, ctx->state_sz);
+	}
+
+	/* command descriptors */
+	for_each_sg(src, sg, nr_src, i) {
+		int len = sg_dma_len(sg);
+
+		/* Do not overflow the request */
+		if (queued - len < 0)
+			len = queued;
+
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
+					   sg_dma_address(sg), len, totlen,
+					   ctx->base.ctxr_dma);
+		if (IS_ERR(cdesc)) {
+			/* No space left in the command descriptor ring */
+			ret = PTR_ERR(cdesc);
+			goto cdesc_rollback;
+		}
+		n_cdesc++;
+
+		if (n_cdesc == 1) {
+			safexcel_context_control(ctx, base, sreq, cdesc);
+			if (ctx->aead)
+				safexcel_aead_token(ctx, iv, cdesc,
+						    sreq->direction, cryptlen,
+						    assoclen, digestsize);
+			else
+				safexcel_skcipher_token(ctx, iv, cdesc,
+							cryptlen);
+		}
+
+		queued -= len;
+		if (!queued)
+			break;
+	}
+
+	/* result descriptors */
+	for_each_sg(dst, sg, nr_dst, i) {
+		bool first = !i, last = (i == nr_dst - 1);
+		u32 len = sg_dma_len(sg);
+
+		rdesc = safexcel_add_rdesc(priv, ring, first, last,
+					   sg_dma_address(sg), len);
+		if (IS_ERR(rdesc)) {
+			/* No space left in the result descriptor ring */
+			ret = PTR_ERR(rdesc);
+			goto rdesc_rollback;
+		}
+		if (first)
+			first_rdesc = rdesc;
+		n_rdesc++;
+	}
+
+	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
+
+	*commands = n_cdesc;
+	*results = n_rdesc;
+	return 0;
+
+rdesc_rollback:
+	for (i = 0; i < n_rdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
+cdesc_rollback:
+	for (i = 0; i < n_cdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+
+	if (src == dst) {
+		dma_unmap_sg(priv->dev, src,
+			     sg_nents_for_len(src, totlen),
+			     DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(priv->dev, src,
+			     sg_nents_for_len(src, totlen),
+			     DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst,
+			     sg_nents_for_len(dst, totlen),
+			     DMA_FROM_DEVICE);
+	}
+
+	return ret;
+}
+
+static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+				      int ring,
+				      struct crypto_async_request *base,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_result_desc *rdesc;
+	int ndesc = 0, enq_ret;
+
+	*ret = 0;
+
+	do {
+		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+		if (IS_ERR(rdesc)) {
+			dev_err(priv->dev,
+				"cipher: invalidate: could not retrieve the result descriptor\n");
+			*ret = PTR_ERR(rdesc);
+			break;
+		}
+
+		if (likely(!*ret))
+			*ret = safexcel_rdesc_check_errors(priv, rdesc);
+
+		ndesc++;
+	} while (!rdesc->last_seg);
+
+	safexcel_complete(priv, ring);
+
+	if (ctx->base.exit_inv) {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+
+		*should_complete = true;
+
+		return ndesc;
+	}
+
+	ring = safexcel_select_ring(priv);
+	ctx->base.ring = ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	if (enq_ret != -EINPROGRESS)
+		*ret = enq_ret;
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	*should_complete = false;
+
+	return ndesc;
+}
+
+static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
+					   int ring,
+					   struct crypto_async_request *async,
+					   bool *should_complete, int *ret)
+{
+	struct skcipher_request *req = skcipher_request_cast(async);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	int err;
+
+	if (sreq->needs_inv) {
+		sreq->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async, req->src,
+						 req->dst, req->cryptlen, sreq,
+						 should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
+				       int ring,
+				       struct crypto_async_request *async,
+				       bool *should_complete, int *ret)
+{
+	struct aead_request *req = aead_request_cast(async);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	int err;
+
+	if (sreq->needs_inv) {
+		sreq->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async, req->src,
+						 req->dst,
+						 req->cryptlen + crypto_aead_authsize(tfm),
+						 sreq, should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_cipher_send_inv(struct crypto_async_request *base,
+				    int ring, int *commands, int *results)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
+	if (unlikely(ret))
+		return ret;
+
+	*commands = 1;
+	*results = 1;
+
+	return 0;
+}
+
+static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
+				  int *commands, int *results)
+{
+	struct skcipher_request *req = skcipher_request_cast(async);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
+
+	if (sreq->needs_inv)
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
+	else
+		ret = safexcel_send_req(async, ring, sreq, req->src,
+					req->dst, req->cryptlen, 0, 0, req->iv,
+					commands, results);
+	return ret;
+}
+
+static int safexcel_aead_send(struct crypto_async_request *async, int ring,
+			      int *commands, int *results)
+{
+	struct aead_request *req = aead_request_cast(async);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
+
+	if (sreq->needs_inv)
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
+	else
+		ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+					req->cryptlen, req->assoclen,
+					crypto_aead_authsize(tfm), req->iv,
+					commands, results);
+	return ret;
+}
+
+static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
+				    struct crypto_async_request *base,
+				    struct safexcel_cipher_req *sreq,
+				    struct safexcel_inv_result *result)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ring = ctx->base.ring;
+
+	init_completion(&result->completion);
+
+	ctx = crypto_tfm_ctx(base->tfm);
+	ctx->base.exit_inv = true;
+	sreq->needs_inv = true;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	wait_for_completion(&result->completion);
+
+	if (result->error) {
+		dev_warn(priv->dev,
+			"cipher: sync: invalidate: completion error %d\n",
+			 result->error);
+		return result->error;
+	}
+
+	return 0;
+}
+
+static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
+{
+	EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	struct safexcel_inv_result result = {};
+
+	memset(req, 0, sizeof(struct skcipher_request));
+
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				      safexcel_inv_complete, &result);
+	skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+
+	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+}
+
+static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
+{
+	EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	struct safexcel_inv_result result = {};
+
+	memset(req, 0, sizeof(struct aead_request));
+
+	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				  safexcel_inv_complete, &result);
+	aead_request_set_tfm(req, __crypto_aead_cast(tfm));
+
+	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+}
+
+static int safexcel_queue_req(struct crypto_async_request *base,
+			struct safexcel_cipher_req *sreq,
+			enum safexcel_cipher_direction dir, u32 mode,
+			enum safexcel_cipher_alg alg)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret, ring;
+
+	sreq->needs_inv = false;
+	sreq->direction = dir;
+	ctx->alg = alg;
+	ctx->mode = mode;
+
+	if (ctx->base.ctxr) {
+		if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
+			sreq->needs_inv = true;
+			ctx->base.needs_inv = false;
+		}
+	} else {
+		ctx->base.ring = safexcel_select_ring(priv);
+		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+						 EIP197_GFP_FLAGS(*base),
+						 &ctx->base.ctxr_dma);
+		if (!ctx->base.ctxr)
+			return -ENOMEM;
+	}
+
+	ring = ctx->base.ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return ret;
+}
+
+static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_AES);
+}
+
+static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_AES);
+}
+
+static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(tfm->__crt_alg, struct safexcel_alg_template,
+			     alg.skcipher.base);
+
+	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+				    sizeof(struct safexcel_cipher_req));
+
+	ctx->priv = tmpl->priv;
+
+	ctx->base.send = safexcel_skcipher_send;
+	ctx->base.handle_result = safexcel_skcipher_handle_result;
+	return 0;
+}
+
+static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	memzero_explicit(ctx->key, sizeof(ctx->key));
+
+	/* context not allocated, skip invalidation */
+	if (!ctx->base.ctxr)
+		return -ENOMEM;
+
+	memzero_explicit(ctx->base.ctxr->data, sizeof(ctx->base.ctxr->data));
+	return 0;
+}
+
+static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	if (safexcel_cipher_cra_exit(tfm))
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_skcipher_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "skcipher: invalidation error %d\n",
+				 ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	if (safexcel_cipher_cra_exit(tfm))
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_aead_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "aead: invalidation error %d\n",
+				 ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aes_setkey,
+		.encrypt = safexcel_ecb_aes_encrypt,
+		.decrypt = safexcel_ecb_aes_decrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "safexcel-ecb-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_AES);
+}
+
+static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_AES);
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aes_setkey,
+		.encrypt = safexcel_cbc_aes_encrypt,
+		.decrypt = safexcel_cbc_aes_decrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "safexcel-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_cbc_des_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_cbc_des_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+			       unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (len != DES_KEY_SIZE) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ret = des_ekey(tmp, key);
+	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	/* if context exits and key changed, need to invalidate it */
+	if (ctx->base.ctxr_dma)
+		if (memcmp(ctx->key, key, len))
+			ctx->base.needs_inv = true;
+
+	memcpy(ctx->key, key, len);
+	ctx->key_len = len;
+
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des_setkey,
+		.encrypt = safexcel_cbc_des_encrypt,
+		.decrypt = safexcel_cbc_des_decrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = DES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "safexcel-cbc-des",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_ecb_des_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_ecb_des_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des_setkey,
+		.encrypt = safexcel_ecb_des_encrypt,
+		.decrypt = safexcel_ecb_des_decrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = DES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "safexcel-ecb-des",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
+				   const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	/* if context exits and key changed, need to invalidate it */
+	if (ctx->base.ctxr_dma) {
+		if (memcmp(ctx->key, key, len))
+			ctx->base.needs_inv = true;
+	}
+
+	memcpy(ctx->key, key, len);
+
+	ctx->key_len = len;
+
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des3_ede_setkey,
+		.encrypt = safexcel_cbc_des3_ede_encrypt,
+		.decrypt = safexcel_cbc_des3_ede_decrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "safexcel-cbc-des3_ede",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_3DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des3_ede_setkey,
+		.encrypt = safexcel_ecb_des3_ede_encrypt,
+		.decrypt = safexcel_ecb_des3_ede_decrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "safexcel-ecb-des3_ede",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_encrypt(struct aead_request *req)
+{
+	struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT,
+			CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
+}
+
+static int safexcel_aead_decrypt(struct aead_request *req)
+{
+	struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT,
+			CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
+}
+
+static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(tfm->__crt_alg, struct safexcel_alg_template,
+			     alg.aead.base);
+
+	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+				sizeof(struct safexcel_cipher_req));
+
+	ctx->priv = tmpl->priv;
+
+	ctx->aead = true;
+	ctx->base.send = safexcel_aead_send;
+	ctx->base.handle_result = safexcel_aead_handle_result;
+	return 0;
+}
+
+static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	ctx->state_sz = SHA1_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA1_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha1),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha1_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	ctx->state_sz = SHA256_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA256_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha256),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha256_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	ctx->state_sz = SHA256_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA224_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha224),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha224_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	ctx->state_sz = SHA512_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA512_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha512),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha512_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	ctx->state_sz = SHA512_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA384_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha384),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha384_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
new file mode 100644
index 0000000..ac9282c
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -0,0 +1,1659 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <crypto/hmac.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include "safexcel.h"
+
+struct safexcel_ahash_ctx {
+	struct safexcel_context base;
+	struct safexcel_crypto_priv *priv;
+
+	u32 alg;
+
+	u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+};
+
+struct safexcel_ahash_req {
+	bool last_req;
+	bool finish;
+	bool hmac;
+	bool needs_inv;
+
+	int nents;
+	dma_addr_t result_dma;
+
+	u32 digest;
+
+	u8 state_sz;    /* expected sate size, only set once */
+	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+
+	u64 len[2];
+	u64 processed[2];
+
+	u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+	dma_addr_t cache_dma;
+	unsigned int cache_sz;
+
+	u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+};
+
+static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+{
+	if (req->len[1] > req->processed[1])
+		return 0xffffffff - (req->len[0] - req->processed[0]);
+
+	return req->len[0] - req->processed[0];
+}
+
+static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
+				u32 input_length, u32 result_length)
+{
+	struct safexcel_token *token =
+		(struct safexcel_token *)cdesc->control_data.token;
+
+	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[0].packet_length = input_length;
+	token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+
+	token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
+	token[1].packet_length = result_length;
+	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
+			EIP197_TOKEN_STAT_LAST_PACKET;
+	token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+				EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+}
+
+static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+				     struct safexcel_ahash_req *req,
+				     struct safexcel_command_desc *cdesc,
+				     unsigned int digestsize)
+{
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int i;
+
+	cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
+	cdesc->control_data.control0 |= ctx->alg;
+	cdesc->control_data.control0 |= req->digest;
+
+	if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
+		if (req->processed[0] || req->processed[1]) {
+			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
+			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
+			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
+				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
+			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
+				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
+
+			cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
+		} else {
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
+		}
+
+		if (!req->finish)
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
+
+		/*
+		 * Copy the input digest if needed, and setup the context
+		 * fields. Do this now as we need it to setup the first command
+		 * descriptor.
+		 */
+		if (req->processed[0] || req->processed[1]) {
+			for (i = 0; i < digestsize / sizeof(u32); i++)
+				ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
+
+			if (req->finish) {
+				u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+				count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
+					  req->processed[1]);
+
+				/* This is a haredware limitation, as the
+				 * counter must fit into an u32. This represents
+				 * a farily big amount of input data, so we
+				 * shouldn't see this.
+				 */
+				if (unlikely(count & 0xffff0000)) {
+					dev_warn(priv->dev,
+						 "Input data is too big\n");
+					return;
+				}
+
+				ctx->base.ctxr->data[i] = cpu_to_le32(count);
+			}
+		}
+	} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
+
+		memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
+		memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
+		       ctx->opad, req->state_sz);
+	}
+}
+
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+				      struct crypto_async_request *async,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_result_desc *rdesc;
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
+	u64 cache_len;
+
+	*ret = 0;
+
+	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+	if (IS_ERR(rdesc)) {
+		dev_err(priv->dev,
+			"hash: result: could not retrieve the result descriptor\n");
+		*ret = PTR_ERR(rdesc);
+	} else {
+		*ret = safexcel_rdesc_check_errors(priv, rdesc);
+	}
+
+	safexcel_complete(priv, ring);
+
+	if (sreq->nents) {
+		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+		sreq->nents = 0;
+	}
+
+	if (sreq->result_dma) {
+		dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
+				 DMA_FROM_DEVICE);
+		sreq->result_dma = 0;
+	}
+
+	if (sreq->cache_dma) {
+		dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
+				 DMA_TO_DEVICE);
+		sreq->cache_dma = 0;
+	}
+
+	if (sreq->finish)
+		memcpy(areq->result, sreq->state,
+		       crypto_ahash_digestsize(ahash));
+
+	cache_len = safexcel_queued_len(sreq);
+	if (cache_len)
+		memcpy(sreq->cache, sreq->cache_next, cache_len);
+
+	*should_complete = true;
+
+	return 1;
+}
+
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+				   int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
+	struct safexcel_result_desc *rdesc;
+	struct scatterlist *sg;
+	int i, extra, n_cdesc = 0, ret = 0;
+	u64 queued, len, cache_len;
+
+	queued = len = safexcel_queued_len(req);
+	if (queued <= crypto_ahash_blocksize(ahash))
+		cache_len = queued;
+	else
+		cache_len = queued - areq->nbytes;
+
+	if (!req->last_req) {
+		/* If this is not the last request and the queued data does not
+		 * fit into full blocks, cache it for the next send() call.
+		 */
+		extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+		if (!extra)
+			/* If this is not the last request and the queued data
+			 * is a multiple of a block, cache the last one for now.
+			 */
+			extra = crypto_ahash_blocksize(ahash);
+
+		if (extra) {
+			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+					   req->cache_next, extra,
+					   areq->nbytes - extra);
+
+			queued -= extra;
+			len -= extra;
+
+			if (!queued) {
+				*commands = 0;
+				*results = 0;
+				return 0;
+			}
+		}
+	}
+
+	/* Add a command descriptor for the cached data, if any */
+	if (cache_len) {
+		req->cache_dma = dma_map_single(priv->dev, req->cache,
+						cache_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(priv->dev, req->cache_dma))
+			return -EINVAL;
+
+		req->cache_sz = cache_len;
+		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
+						 (cache_len == len),
+						 req->cache_dma, cache_len, len,
+						 ctx->base.ctxr_dma);
+		if (IS_ERR(first_cdesc)) {
+			ret = PTR_ERR(first_cdesc);
+			goto unmap_cache;
+		}
+		n_cdesc++;
+
+		queued -= cache_len;
+		if (!queued)
+			goto send_command;
+	}
+
+	/* Now handle the current ahash request buffer(s) */
+	req->nents = dma_map_sg(priv->dev, areq->src,
+				sg_nents_for_len(areq->src, areq->nbytes),
+				DMA_TO_DEVICE);
+	if (!req->nents) {
+		ret = -ENOMEM;
+		goto cdesc_rollback;
+	}
+
+	for_each_sg(areq->src, sg, req->nents, i) {
+		int sglen = sg_dma_len(sg);
+
+		/* Do not overflow the request */
+		if (queued < sglen)
+			sglen = queued;
+
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+					   !(queued - sglen), sg_dma_address(sg),
+					   sglen, len, ctx->base.ctxr_dma);
+		if (IS_ERR(cdesc)) {
+			ret = PTR_ERR(cdesc);
+			goto unmap_sg;
+		}
+		n_cdesc++;
+
+		if (n_cdesc == 1)
+			first_cdesc = cdesc;
+
+		queued -= sglen;
+		if (!queued)
+			break;
+	}
+
+send_command:
+	/* Setup the context options */
+	safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
+
+	/* Add the token */
+	safexcel_hash_token(first_cdesc, len, req->state_sz);
+
+	req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
+					 DMA_FROM_DEVICE);
+	if (dma_mapping_error(priv->dev, req->result_dma)) {
+		ret = -EINVAL;
+		goto unmap_sg;
+	}
+
+	/* Add a result descriptor */
+	rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
+				   req->state_sz);
+	if (IS_ERR(rdesc)) {
+		ret = PTR_ERR(rdesc);
+		goto unmap_result;
+	}
+
+	safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
+
+	req->processed[0] += len;
+	if (req->processed[0] < len)
+		req->processed[1]++;
+
+	*commands = n_cdesc;
+	*results = 1;
+	return 0;
+
+unmap_result:
+	dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+			 DMA_FROM_DEVICE);
+unmap_sg:
+	dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+cdesc_rollback:
+	for (i = 0; i < n_cdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+unmap_cache:
+	if (req->cache_dma) {
+		dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
+				 DMA_TO_DEVICE);
+		req->cache_sz = 0;
+	}
+
+	return ret;
+}
+
+static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	unsigned int state_w_sz = req->state_sz / sizeof(u32);
+	u64 processed;
+	int i;
+
+	processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+	processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
+
+	for (i = 0; i < state_w_sz; i++)
+		if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
+			return true;
+
+	if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
+		return true;
+
+	return false;
+}
+
+static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+				      int ring,
+				      struct crypto_async_request *async,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_result_desc *rdesc;
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int enq_ret;
+
+	*ret = 0;
+
+	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+	if (IS_ERR(rdesc)) {
+		dev_err(priv->dev,
+			"hash: invalidate: could not retrieve the result descriptor\n");
+		*ret = PTR_ERR(rdesc);
+	} else {
+		*ret = safexcel_rdesc_check_errors(priv, rdesc);
+	}
+
+	safexcel_complete(priv, ring);
+
+	if (ctx->base.exit_inv) {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+
+		*should_complete = true;
+		return 1;
+	}
+
+	ring = safexcel_select_ring(priv);
+	ctx->base.ring = ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	if (enq_ret != -EINPROGRESS)
+		*ret = enq_ret;
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	*should_complete = false;
+
+	return 1;
+}
+
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+				  struct crypto_async_request *async,
+				  bool *should_complete, int *ret)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	int err;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
+
+	if (req->needs_inv) {
+		req->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async,
+						 should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_ahash_send_inv(struct crypto_async_request *async,
+				   int ring, int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	int ret;
+
+	ret = safexcel_invalidate_cache(async, ctx->priv,
+					ctx->base.ctxr_dma, ring);
+	if (unlikely(ret))
+		return ret;
+
+	*commands = 1;
+	*results = 1;
+
+	return 0;
+}
+
+static int safexcel_ahash_send(struct crypto_async_request *async,
+			       int ring, int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	int ret;
+
+	if (req->needs_inv)
+		ret = safexcel_ahash_send_inv(async, ring, commands, results);
+	else
+		ret = safexcel_ahash_send_req(async, ring, commands, results);
+
+	return ret;
+}
+
+static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
+	struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
+	struct safexcel_inv_result result = {};
+	int ring = ctx->base.ring;
+
+	memset(req, 0, sizeof(struct ahash_request));
+
+	/* create invalidation request */
+	init_completion(&result.completion);
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   safexcel_inv_complete, &result);
+
+	ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+	ctx = crypto_tfm_ctx(req->base.tfm);
+	ctx->base.exit_inv = true;
+	rctx->needs_inv = true;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	wait_for_completion(&result.completion);
+
+	if (result.error) {
+		dev_warn(priv->dev, "hash: completion error (%d)\n",
+			 result.error);
+		return result.error;
+	}
+
+	return 0;
+}
+
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
+static int safexcel_ahash_cache(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	u64 queued, cache_len;
+
+	/* queued: everything accepted by the driver which will be handled by
+	 * the next send() calls.
+	 * tot sz handled by update() - tot sz handled by send()
+	 */
+	queued = safexcel_queued_len(req);
+	/* cache_len: everything accepted by the driver but not sent yet,
+	 * tot sz handled by update() - last req sz - tot sz handled by send()
+	 */
+	cache_len = queued - areq->nbytes;
+
+	/*
+	 * In case there isn't enough bytes to proceed (less than a
+	 * block size), cache the data until we have enough.
+	 */
+	if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
+		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+				   req->cache + cache_len,
+				   areq->nbytes, 0);
+		return areq->nbytes;
+	}
+
+	/* We couldn't cache all the data */
+	return -E2BIG;
+}
+
+static int safexcel_ahash_enqueue(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret, ring;
+
+	req->needs_inv = false;
+
+	if (ctx->base.ctxr) {
+		if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+		    (req->processed[0] || req->processed[1]) &&
+		    req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+			/* We're still setting needs_inv here, even though it is
+			 * cleared right away, because the needs_inv flag can be
+			 * set in other functions and we want to keep the same
+			 * logic.
+			 */
+			ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
+		if (ctx->base.needs_inv) {
+			ctx->base.needs_inv = false;
+			req->needs_inv = true;
+		}
+	} else {
+		ctx->base.ring = safexcel_select_ring(priv);
+		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+						 EIP197_GFP_FLAGS(areq->base),
+						 &ctx->base.ctxr_dma);
+		if (!ctx->base.ctxr)
+			return -ENOMEM;
+	}
+
+	ring = ctx->base.ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return ret;
+}
+
+static int safexcel_ahash_update(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+	/* If the request is 0 length, do nothing */
+	if (!areq->nbytes)
+		return 0;
+
+	req->len[0] += areq->nbytes;
+	if (req->len[0] < areq->nbytes)
+		req->len[1]++;
+
+	safexcel_ahash_cache(areq);
+
+	/*
+	 * We're not doing partial updates when performing an hmac request.
+	 * Everything will be handled by the final() call.
+	 */
+	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+		return 0;
+
+	if (req->hmac)
+		return safexcel_ahash_enqueue(areq);
+
+	if (!req->last_req &&
+	    safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
+		return safexcel_ahash_enqueue(areq);
+
+	return 0;
+}
+
+static int safexcel_ahash_final(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+
+	req->last_req = true;
+	req->finish = true;
+
+	/* If we have an overall 0 length request */
+	if (!req->len[0] && !req->len[1] && !areq->nbytes) {
+		if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+			memcpy(areq->result, md5_zero_message_hash,
+			       MD5_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+			memcpy(areq->result, sha1_zero_message_hash,
+			       SHA1_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
+			memcpy(areq->result, sha224_zero_message_hash,
+			       SHA224_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
+			memcpy(areq->result, sha256_zero_message_hash,
+			       SHA256_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
+			memcpy(areq->result, sha384_zero_message_hash,
+			       SHA384_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+			memcpy(areq->result, sha512_zero_message_hash,
+			       SHA512_DIGEST_SIZE);
+
+		return 0;
+	}
+
+	return safexcel_ahash_enqueue(areq);
+}
+
+static int safexcel_ahash_finup(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	req->last_req = true;
+	req->finish = true;
+
+	safexcel_ahash_update(areq);
+	return safexcel_ahash_final(areq);
+}
+
+static int safexcel_ahash_export(struct ahash_request *areq, void *out)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_export_state *export = out;
+
+	export->len[0] = req->len[0];
+	export->len[1] = req->len[1];
+	export->processed[0] = req->processed[0];
+	export->processed[1] = req->processed[1];
+
+	export->digest = req->digest;
+
+	memcpy(export->state, req->state, req->state_sz);
+	memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+
+	return 0;
+}
+
+static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	const struct safexcel_ahash_export_state *export = in;
+	int ret;
+
+	ret = crypto_ahash_init(areq);
+	if (ret)
+		return ret;
+
+	req->len[0] = export->len[0];
+	req->len[1] = export->len[1];
+	req->processed[0] = export->processed[0];
+	req->processed[1] = export->processed[1];
+
+	req->digest = export->digest;
+
+	memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+	memcpy(req->state, export->state, req->state_sz);
+
+	return 0;
+}
+
+static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(__crypto_ahash_alg(tfm->__crt_alg),
+			     struct safexcel_alg_template, alg.ahash);
+
+	ctx->priv = tmpl->priv;
+	ctx->base.send = safexcel_ahash_send;
+	ctx->base.handle_result = safexcel_handle_result;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct safexcel_ahash_req));
+	return 0;
+}
+
+static int safexcel_sha1_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = SHA1_H0;
+	req->state[1] = SHA1_H1;
+	req->state[2] = SHA1_H2;
+	req->state[3] = SHA1_H3;
+	req->state[4] = SHA1_H4;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA1_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha1_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha1_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	/* context not allocated, skip invalidation */
+	if (!ctx->base.ctxr)
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_ahash_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+struct safexcel_alg_template safexcel_alg_sha1 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha1_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha1_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "safexcel-sha1",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha1_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha1_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha1_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_ahash_result {
+	struct completion completion;
+	int error;
+};
+
+static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
+{
+	struct safexcel_ahash_result *result = req->data;
+
+	if (error == -EINPROGRESS)
+		return;
+
+	result->error = error;
+	complete(&result->completion);
+}
+
+static int safexcel_hmac_init_pad(struct ahash_request *areq,
+				  unsigned int blocksize, const u8 *key,
+				  unsigned int keylen, u8 *ipad, u8 *opad)
+{
+	struct safexcel_ahash_result result;
+	struct scatterlist sg;
+	int ret, i;
+	u8 *keydup;
+
+	if (keylen <= blocksize) {
+		memcpy(ipad, key, keylen);
+	} else {
+		keydup = kmemdup(key, keylen, GFP_KERNEL);
+		if (!keydup)
+			return -ENOMEM;
+
+		ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					   safexcel_ahash_complete, &result);
+		sg_init_one(&sg, keydup, keylen);
+		ahash_request_set_crypt(areq, &sg, ipad, keylen);
+		init_completion(&result.completion);
+
+		ret = crypto_ahash_digest(areq);
+		if (ret == -EINPROGRESS || ret == -EBUSY) {
+			wait_for_completion_interruptible(&result.completion);
+			ret = result.error;
+		}
+
+		/* Avoid leaking */
+		memzero_explicit(keydup, keylen);
+		kfree(keydup);
+
+		if (ret)
+			return ret;
+
+		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
+	}
+
+	memset(ipad + keylen, 0, blocksize - keylen);
+	memcpy(opad, ipad, blocksize);
+
+	for (i = 0; i < blocksize; i++) {
+		ipad[i] ^= HMAC_IPAD_VALUE;
+		opad[i] ^= HMAC_OPAD_VALUE;
+	}
+
+	return 0;
+}
+
+static int safexcel_hmac_init_iv(struct ahash_request *areq,
+				 unsigned int blocksize, u8 *pad, void *state)
+{
+	struct safexcel_ahash_result result;
+	struct safexcel_ahash_req *req;
+	struct scatterlist sg;
+	int ret;
+
+	ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   safexcel_ahash_complete, &result);
+	sg_init_one(&sg, pad, blocksize);
+	ahash_request_set_crypt(areq, &sg, pad, blocksize);
+	init_completion(&result.completion);
+
+	ret = crypto_ahash_init(areq);
+	if (ret)
+		return ret;
+
+	req = ahash_request_ctx(areq);
+	req->hmac = true;
+	req->last_req = true;
+
+	ret = crypto_ahash_update(areq);
+	if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+		return ret;
+
+	wait_for_completion_interruptible(&result.completion);
+	if (result.error)
+		return result.error;
+
+	return crypto_ahash_export(areq, state);
+}
+
+int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
+			 void *istate, void *ostate)
+{
+	struct ahash_request *areq;
+	struct crypto_ahash *tfm;
+	unsigned int blocksize;
+	u8 *ipad, *opad;
+	int ret;
+
+	tfm = crypto_alloc_ahash(alg, 0, 0);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	areq = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!areq) {
+		ret = -ENOMEM;
+		goto free_ahash;
+	}
+
+	crypto_ahash_clear_flags(tfm, ~0);
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	ipad = kcalloc(2, blocksize, GFP_KERNEL);
+	if (!ipad) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+
+	opad = ipad + blocksize;
+
+	ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
+	if (ret)
+		goto free_ipad;
+
+	ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
+	if (ret)
+		goto free_ipad;
+
+	ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
+
+free_ipad:
+	kfree(ipad);
+free_request:
+	ahash_request_free(areq);
+free_ahash:
+	crypto_free_ahash(tfm);
+
+	return ret;
+}
+
+static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
+				    unsigned int keylen, const char *alg,
+				    unsigned int state_sz)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_ahash_export_state istate, ostate;
+	int ret, i;
+
+	ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
+	if (ret)
+		return ret;
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
+		for (i = 0; i < state_sz / sizeof(u32); i++) {
+			if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+			    ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	memcpy(ctx->ipad, &istate.state, state_sz);
+	memcpy(ctx->opad, &ostate.state, state_sz);
+
+	return 0;
+}
+
+static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
+					SHA1_DIGEST_SIZE);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha1_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha1_digest,
+		.setkey = safexcel_hmac_sha1_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha1)",
+				.cra_driver_name = "safexcel-hmac-sha1",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha256_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = SHA256_H0;
+	req->state[1] = SHA256_H1;
+	req->state[2] = SHA256_H2;
+	req->state[3] = SHA256_H3;
+	req->state[4] = SHA256_H4;
+	req->state[5] = SHA256_H5;
+	req->state[6] = SHA256_H6;
+	req->state[7] = SHA256_H7;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha256_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha256_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha256 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha256_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha256_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "safexcel-sha256",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha224_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = SHA224_H0;
+	req->state[1] = SHA224_H1;
+	req->state[2] = SHA224_H2;
+	req->state[3] = SHA224_H3;
+	req->state[4] = SHA224_H4;
+	req->state[5] = SHA224_H5;
+	req->state[6] = SHA224_H6;
+	req->state[7] = SHA224_H7;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha224_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha224_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha224 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha224_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha224_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha224",
+				.cra_driver_name = "safexcel-sha224",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
+					SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha224_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha224_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha224_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha224_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha224_digest,
+		.setkey = safexcel_hmac_sha224_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha224)",
+				.cra_driver_name = "safexcel-hmac-sha224",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
+					SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha256_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha256_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha256_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha256_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha256_digest,
+		.setkey = safexcel_hmac_sha256_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha256)",
+				.cra_driver_name = "safexcel-hmac-sha256",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha512_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = lower_32_bits(SHA512_H0);
+	req->state[1] = upper_32_bits(SHA512_H0);
+	req->state[2] = lower_32_bits(SHA512_H1);
+	req->state[3] = upper_32_bits(SHA512_H1);
+	req->state[4] = lower_32_bits(SHA512_H2);
+	req->state[5] = upper_32_bits(SHA512_H2);
+	req->state[6] = lower_32_bits(SHA512_H3);
+	req->state[7] = upper_32_bits(SHA512_H3);
+	req->state[8] = lower_32_bits(SHA512_H4);
+	req->state[9] = upper_32_bits(SHA512_H4);
+	req->state[10] = lower_32_bits(SHA512_H5);
+	req->state[11] = upper_32_bits(SHA512_H5);
+	req->state[12] = lower_32_bits(SHA512_H6);
+	req->state[13] = upper_32_bits(SHA512_H6);
+	req->state[14] = lower_32_bits(SHA512_H7);
+	req->state[15] = upper_32_bits(SHA512_H7);
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha512_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha512_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha512 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha512_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha512_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA512_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha512",
+				.cra_driver_name = "safexcel-sha512",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha384_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = lower_32_bits(SHA384_H0);
+	req->state[1] = upper_32_bits(SHA384_H0);
+	req->state[2] = lower_32_bits(SHA384_H1);
+	req->state[3] = upper_32_bits(SHA384_H1);
+	req->state[4] = lower_32_bits(SHA384_H2);
+	req->state[5] = upper_32_bits(SHA384_H2);
+	req->state[6] = lower_32_bits(SHA384_H3);
+	req->state[7] = upper_32_bits(SHA384_H3);
+	req->state[8] = lower_32_bits(SHA384_H4);
+	req->state[9] = upper_32_bits(SHA384_H4);
+	req->state[10] = lower_32_bits(SHA384_H5);
+	req->state[11] = upper_32_bits(SHA384_H5);
+	req->state[12] = lower_32_bits(SHA384_H6);
+	req->state[13] = upper_32_bits(SHA384_H6);
+	req->state[14] = lower_32_bits(SHA384_H7);
+	req->state[15] = upper_32_bits(SHA384_H7);
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha384_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha384_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha384 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha384_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha384_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA384_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha384",
+				.cra_driver_name = "safexcel-sha384",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
+					SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha512_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha512_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha512_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha512_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha512_digest,
+		.setkey = safexcel_hmac_sha512_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA512_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha512)",
+				.cra_driver_name = "safexcel-hmac-sha512",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
+					SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha384_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha384_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha384_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha384_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha384_digest,
+		.setkey = safexcel_hmac_sha384_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA384_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha384)",
+				.cra_driver_name = "safexcel-hmac-sha384",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_md5_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = MD5_H0;
+	req->state[1] = MD5_H1;
+	req->state[2] = MD5_H2;
+	req->state[3] = MD5_H3;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = MD5_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_md5_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_md5_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_md5 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_md5_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_md5_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "safexcel-md5",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_md5_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_md5_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
+					MD5_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_md5_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_md5_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_md5 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_md5_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_md5_digest,
+		.setkey = safexcel_hmac_md5_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(md5)",
+				.cra_driver_name = "safexcel-hmac-md5",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
new file mode 100644
index 0000000..eb75fa6
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+
+#include "safexcel.h"
+
+int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
+				   struct safexcel_desc_ring *cdr,
+				   struct safexcel_desc_ring *rdr)
+{
+	cdr->offset = sizeof(u32) * priv->config.cd_offset;
+	cdr->base = dmam_alloc_coherent(priv->dev,
+					cdr->offset * EIP197_DEFAULT_RING_SIZE,
+					&cdr->base_dma, GFP_KERNEL);
+	if (!cdr->base)
+		return -ENOMEM;
+	cdr->write = cdr->base;
+	cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
+	cdr->read = cdr->base;
+
+	rdr->offset = sizeof(u32) * priv->config.rd_offset;
+	rdr->base = dmam_alloc_coherent(priv->dev,
+					rdr->offset * EIP197_DEFAULT_RING_SIZE,
+					&rdr->base_dma, GFP_KERNEL);
+	if (!rdr->base)
+		return -ENOMEM;
+	rdr->write = rdr->base;
+	rdr->base_end = rdr->base + rdr->offset  * (EIP197_DEFAULT_RING_SIZE - 1);
+	rdr->read = rdr->base;
+
+	return 0;
+}
+
+inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
+{
+	return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
+}
+
+static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
+				     struct safexcel_desc_ring *ring)
+{
+	void *ptr = ring->write;
+
+	if ((ring->write == ring->read - ring->offset) ||
+	    (ring->read == ring->base && ring->write == ring->base_end))
+		return ERR_PTR(-ENOMEM);
+
+	if (ring->write == ring->base_end)
+		ring->write = ring->base;
+	else
+		ring->write += ring->offset;
+
+	return ptr;
+}
+
+void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
+			      struct safexcel_desc_ring *ring)
+{
+	void *ptr = ring->read;
+
+	if (ring->write == ring->read)
+		return ERR_PTR(-ENOENT);
+
+	if (ring->read == ring->base_end)
+		ring->read = ring->base;
+	else
+		ring->read += ring->offset;
+
+	return ptr;
+}
+
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+				     int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+					 int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+					 int ring,
+					 struct safexcel_result_desc *rdesc)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+				 struct safexcel_desc_ring *ring)
+{
+	if (ring->write == ring->read)
+		return;
+
+	if (ring->write == ring->base)
+		ring->write = ring->base_end;
+	else
+		ring->write -= ring->offset;
+}
+
+struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						 bool first, bool last,
+						 dma_addr_t data, u32 data_len,
+						 u32 full_data_len,
+						 dma_addr_t context) {
+	struct safexcel_command_desc *cdesc;
+	int i;
+
+	cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+	if (IS_ERR(cdesc))
+		return cdesc;
+
+	memset(cdesc, 0, sizeof(struct safexcel_command_desc));
+
+	cdesc->first_seg = first;
+	cdesc->last_seg = last;
+	cdesc->particle_size = data_len;
+	cdesc->data_lo = lower_32_bits(data);
+	cdesc->data_hi = upper_32_bits(data);
+
+	if (first && context) {
+		struct safexcel_token *token =
+			(struct safexcel_token *)cdesc->control_data.token;
+
+		cdesc->control_data.packet_length = full_data_len;
+		cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
+					      EIP197_OPTION_64BIT_CTX |
+					      EIP197_OPTION_CTX_CTRL_IN_CMD;
+		cdesc->control_data.context_lo =
+			(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+		cdesc->control_data.context_hi = upper_32_bits(context);
+
+		/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
+		cdesc->control_data.refresh = 2;
+
+		for (i = 0; i < EIP197_MAX_TOKENS; i++)
+			eip197_noop_token(&token[i]);
+	}
+
+	return cdesc;
+}
+
+struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+						int ring_id,
+						bool first, bool last,
+						dma_addr_t data, u32 len)
+{
+	struct safexcel_result_desc *rdesc;
+
+	rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+	if (IS_ERR(rdesc))
+		return rdesc;
+
+	memset(rdesc, 0, sizeof(struct safexcel_result_desc));
+
+	rdesc->first_seg = first;
+	rdesc->last_seg = last;
+	rdesc->particle_size = len;
+	rdesc->data_lo = lower_32_bits(data);
+	rdesc->data_hi = upper_32_bits(data);
+
+	return rdesc;
+}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
new file mode 100644
index 0000000..27f7dad
--- /dev/null
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -0,0 +1,1496 @@
+/*
+ * Intel IXP4xx NPE-C crypto driver
+ *
+ * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/hmac.h>
+#include <crypto/sha.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+#include <mach/npe.h>
+#include <mach/qmgr.h>
+
+#define MAX_KEYLEN 32
+
+/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
+#define NPE_CTX_LEN 80
+#define AES_BLOCK128 16
+
+#define NPE_OP_HASH_VERIFY   0x01
+#define NPE_OP_CCM_ENABLE    0x04
+#define NPE_OP_CRYPT_ENABLE  0x08
+#define NPE_OP_HASH_ENABLE   0x10
+#define NPE_OP_NOT_IN_PLACE  0x20
+#define NPE_OP_HMAC_DISABLE  0x40
+#define NPE_OP_CRYPT_ENCRYPT 0x80
+
+#define NPE_OP_CCM_GEN_MIC   0xcc
+#define NPE_OP_HASH_GEN_ICV  0x50
+#define NPE_OP_ENC_GEN_KEY   0xc9
+
+#define MOD_ECB     0x0000
+#define MOD_CTR     0x1000
+#define MOD_CBC_ENC 0x2000
+#define MOD_CBC_DEC 0x3000
+#define MOD_CCM_ENC 0x4000
+#define MOD_CCM_DEC 0x5000
+
+#define KEYLEN_128  4
+#define KEYLEN_192  6
+#define KEYLEN_256  8
+
+#define CIPH_DECR   0x0000
+#define CIPH_ENCR   0x0400
+
+#define MOD_DES     0x0000
+#define MOD_TDEA2   0x0100
+#define MOD_3DES   0x0200
+#define MOD_AES     0x0800
+#define MOD_AES128  (0x0800 | KEYLEN_128)
+#define MOD_AES192  (0x0900 | KEYLEN_192)
+#define MOD_AES256  (0x0a00 | KEYLEN_256)
+
+#define MAX_IVLEN   16
+#define NPE_ID      2  /* NPE C */
+#define NPE_QLEN    16
+/* Space for registering when the first
+ * NPE_QLEN crypt_ctl are busy */
+#define NPE_QLEN_TOTAL 64
+
+#define SEND_QID    29
+#define RECV_QID    30
+
+#define CTL_FLAG_UNUSED		0x0000
+#define CTL_FLAG_USED		0x1000
+#define CTL_FLAG_PERFORM_ABLK	0x0001
+#define CTL_FLAG_GEN_ICV	0x0002
+#define CTL_FLAG_GEN_REVAES	0x0004
+#define CTL_FLAG_PERFORM_AEAD	0x0008
+#define CTL_FLAG_MASK		0x000f
+
+#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
+
+#define MD5_DIGEST_SIZE   16
+
+struct buffer_desc {
+	u32 phys_next;
+#ifdef __ARMEB__
+	u16 buf_len;
+	u16 pkt_len;
+#else
+	u16 pkt_len;
+	u16 buf_len;
+#endif
+	u32 phys_addr;
+	u32 __reserved[4];
+	struct buffer_desc *next;
+	enum dma_data_direction dir;
+};
+
+struct crypt_ctl {
+#ifdef __ARMEB__
+	u8 mode;		/* NPE_OP_*  operation mode */
+	u8 init_len;
+	u16 reserved;
+#else
+	u16 reserved;
+	u8 init_len;
+	u8 mode;		/* NPE_OP_*  operation mode */
+#endif
+	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
+	u32 icv_rev_aes;	/* icv or rev aes */
+	u32 src_buf;
+	u32 dst_buf;
+#ifdef __ARMEB__
+	u16 auth_offs;		/* Authentication start offset */
+	u16 auth_len;		/* Authentication data length */
+	u16 crypt_offs;		/* Cryption start offset */
+	u16 crypt_len;		/* Cryption data length */
+#else
+	u16 auth_len;		/* Authentication data length */
+	u16 auth_offs;		/* Authentication start offset */
+	u16 crypt_len;		/* Cryption data length */
+	u16 crypt_offs;		/* Cryption start offset */
+#endif
+	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
+	u32 crypto_ctx;		/* NPE Crypto Param structure address */
+
+	/* Used by Host: 4*4 bytes*/
+	unsigned ctl_flags;
+	union {
+		struct ablkcipher_request *ablk_req;
+		struct aead_request *aead_req;
+		struct crypto_tfm *tfm;
+	} data;
+	struct buffer_desc *regist_buf;
+	u8 *regist_ptr;
+};
+
+struct ablk_ctx {
+	struct buffer_desc *src;
+	struct buffer_desc *dst;
+};
+
+struct aead_ctx {
+	struct buffer_desc *src;
+	struct buffer_desc *dst;
+	struct scatterlist ivlist;
+	/* used when the hmac is not on one sg entry */
+	u8 *hmac_virt;
+	int encrypt;
+};
+
+struct ix_hash_algo {
+	u32 cfgword;
+	unsigned char *icv;
+};
+
+struct ix_sa_dir {
+	unsigned char *npe_ctx;
+	dma_addr_t npe_ctx_phys;
+	int npe_ctx_idx;
+	u8 npe_mode;
+};
+
+struct ixp_ctx {
+	struct ix_sa_dir encrypt;
+	struct ix_sa_dir decrypt;
+	int authkey_len;
+	u8 authkey[MAX_KEYLEN];
+	int enckey_len;
+	u8 enckey[MAX_KEYLEN];
+	u8 salt[MAX_IVLEN];
+	u8 nonce[CTR_RFC3686_NONCE_SIZE];
+	unsigned salted;
+	atomic_t configuring;
+	struct completion completion;
+};
+
+struct ixp_alg {
+	struct crypto_alg crypto;
+	const struct ix_hash_algo *hash;
+	u32 cfg_enc;
+	u32 cfg_dec;
+
+	int registered;
+};
+
+struct ixp_aead_alg {
+	struct aead_alg crypto;
+	const struct ix_hash_algo *hash;
+	u32 cfg_enc;
+	u32 cfg_dec;
+
+	int registered;
+};
+
+static const struct ix_hash_algo hash_alg_md5 = {
+	.cfgword	= 0xAA010004,
+	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+};
+static const struct ix_hash_algo hash_alg_sha1 = {
+	.cfgword	= 0x00000005,
+	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
+			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
+};
+
+static struct npe *npe_c;
+static struct dma_pool *buffer_pool = NULL;
+static struct dma_pool *ctx_pool = NULL;
+
+static struct crypt_ctl *crypt_virt = NULL;
+static dma_addr_t crypt_phys;
+
+static int support_aes = 1;
+
+#define DRIVER_NAME "ixp4xx_crypto"
+
+static struct platform_device *pdev;
+
+static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
+{
+	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
+}
+
+static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
+{
+	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
+}
+
+static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
+{
+	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
+}
+
+static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
+{
+	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
+}
+
+static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
+{
+	return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
+}
+
+static int setup_crypt_desc(void)
+{
+	struct device *dev = &pdev->dev;
+	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
+	crypt_virt = dma_zalloc_coherent(dev,
+					 NPE_QLEN * sizeof(struct crypt_ctl),
+					 &crypt_phys, GFP_ATOMIC);
+	if (!crypt_virt)
+		return -ENOMEM;
+	return 0;
+}
+
+static spinlock_t desc_lock;
+static struct crypt_ctl *get_crypt_desc(void)
+{
+	int i;
+	static int idx = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&desc_lock, flags);
+
+	if (unlikely(!crypt_virt))
+		setup_crypt_desc();
+	if (unlikely(!crypt_virt)) {
+		spin_unlock_irqrestore(&desc_lock, flags);
+		return NULL;
+	}
+	i = idx;
+	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
+		if (++idx >= NPE_QLEN)
+			idx = 0;
+		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
+		spin_unlock_irqrestore(&desc_lock, flags);
+		return crypt_virt +i;
+	} else {
+		spin_unlock_irqrestore(&desc_lock, flags);
+		return NULL;
+	}
+}
+
+static spinlock_t emerg_lock;
+static struct crypt_ctl *get_crypt_desc_emerg(void)
+{
+	int i;
+	static int idx = NPE_QLEN;
+	struct crypt_ctl *desc;
+	unsigned long flags;
+
+	desc = get_crypt_desc();
+	if (desc)
+		return desc;
+	if (unlikely(!crypt_virt))
+		return NULL;
+
+	spin_lock_irqsave(&emerg_lock, flags);
+	i = idx;
+	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
+		if (++idx >= NPE_QLEN_TOTAL)
+			idx = NPE_QLEN;
+		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
+		spin_unlock_irqrestore(&emerg_lock, flags);
+		return crypt_virt +i;
+	} else {
+		spin_unlock_irqrestore(&emerg_lock, flags);
+		return NULL;
+	}
+}
+
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
+{
+	while (buf) {
+		struct buffer_desc *buf1;
+		u32 phys1;
+
+		buf1 = buf->next;
+		phys1 = buf->phys_next;
+		dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
+		dma_pool_free(buffer_pool, buf, phys);
+		buf = buf1;
+		phys = phys1;
+	}
+}
+
+static struct tasklet_struct crypto_done_tasklet;
+
+static void finish_scattered_hmac(struct crypt_ctl *crypt)
+{
+	struct aead_request *req = crypt->data.aead_req;
+	struct aead_ctx *req_ctx = aead_request_ctx(req);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	int authsize = crypto_aead_authsize(tfm);
+	int decryptlen = req->assoclen + req->cryptlen - authsize;
+
+	if (req_ctx->encrypt) {
+		scatterwalk_map_and_copy(req_ctx->hmac_virt,
+			req->dst, decryptlen, authsize, 1);
+	}
+	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
+}
+
+static void one_packet(dma_addr_t phys)
+{
+	struct device *dev = &pdev->dev;
+	struct crypt_ctl *crypt;
+	struct ixp_ctx *ctx;
+	int failed;
+
+	failed = phys & 0x1 ? -EBADMSG : 0;
+	phys &= ~0x3;
+	crypt = crypt_phys2virt(phys);
+
+	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
+	case CTL_FLAG_PERFORM_AEAD: {
+		struct aead_request *req = crypt->data.aead_req;
+		struct aead_ctx *req_ctx = aead_request_ctx(req);
+
+		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+		if (req_ctx->hmac_virt) {
+			finish_scattered_hmac(crypt);
+		}
+		req->base.complete(&req->base, failed);
+		break;
+	}
+	case CTL_FLAG_PERFORM_ABLK: {
+		struct ablkcipher_request *req = crypt->data.ablk_req;
+		struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+		if (req_ctx->dst) {
+			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+		}
+		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+		req->base.complete(&req->base, failed);
+		break;
+	}
+	case CTL_FLAG_GEN_ICV:
+		ctx = crypto_tfm_ctx(crypt->data.tfm);
+		dma_pool_free(ctx_pool, crypt->regist_ptr,
+				crypt->regist_buf->phys_addr);
+		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
+		if (atomic_dec_and_test(&ctx->configuring))
+			complete(&ctx->completion);
+		break;
+	case CTL_FLAG_GEN_REVAES:
+		ctx = crypto_tfm_ctx(crypt->data.tfm);
+		*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
+		if (atomic_dec_and_test(&ctx->configuring))
+			complete(&ctx->completion);
+		break;
+	default:
+		BUG();
+	}
+	crypt->ctl_flags = CTL_FLAG_UNUSED;
+}
+
+static void irqhandler(void *_unused)
+{
+	tasklet_schedule(&crypto_done_tasklet);
+}
+
+static void crypto_done_action(unsigned long arg)
+{
+	int i;
+
+	for(i=0; i<4; i++) {
+		dma_addr_t phys = qmgr_get_entry(RECV_QID);
+		if (!phys)
+			return;
+		one_packet(phys);
+	}
+	tasklet_schedule(&crypto_done_tasklet);
+}
+
+static int init_ixp_crypto(struct device *dev)
+{
+	int ret = -ENODEV;
+	u32 msg[2] = { 0, 0 };
+
+	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
+				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
+		printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
+		return ret;
+	}
+	npe_c = npe_request(NPE_ID);
+	if (!npe_c)
+		return ret;
+
+	if (!npe_running(npe_c)) {
+		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
+		if (ret)
+			goto npe_release;
+		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+			goto npe_error;
+	} else {
+		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
+			goto npe_error;
+
+		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+			goto npe_error;
+	}
+
+	switch ((msg[1]>>16) & 0xff) {
+	case 3:
+		printk(KERN_WARNING "Firmware of %s lacks AES support\n",
+				npe_name(npe_c));
+		support_aes = 0;
+		break;
+	case 4:
+	case 5:
+		support_aes = 1;
+		break;
+	default:
+		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
+			npe_name(npe_c));
+		ret = -ENODEV;
+		goto npe_release;
+	}
+	/* buffer_pool will also be used to sometimes store the hmac,
+	 * so assure it is large enough
+	 */
+	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
+	buffer_pool = dma_pool_create("buffer", dev,
+			sizeof(struct buffer_desc), 32, 0);
+	ret = -ENOMEM;
+	if (!buffer_pool) {
+		goto err;
+	}
+	ctx_pool = dma_pool_create("context", dev,
+			NPE_CTX_LEN, 16, 0);
+	if (!ctx_pool) {
+		goto err;
+	}
+	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
+				 "ixp_crypto:out", NULL);
+	if (ret)
+		goto err;
+	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
+				 "ixp_crypto:in", NULL);
+	if (ret) {
+		qmgr_release_queue(SEND_QID);
+		goto err;
+	}
+	qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
+	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
+
+	qmgr_enable_irq(RECV_QID);
+	return 0;
+
+npe_error:
+	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
+	ret = -EIO;
+err:
+	dma_pool_destroy(ctx_pool);
+	dma_pool_destroy(buffer_pool);
+npe_release:
+	npe_release(npe_c);
+	return ret;
+}
+
+static void release_ixp_crypto(struct device *dev)
+{
+	qmgr_disable_irq(RECV_QID);
+	tasklet_kill(&crypto_done_tasklet);
+
+	qmgr_release_queue(SEND_QID);
+	qmgr_release_queue(RECV_QID);
+
+	dma_pool_destroy(ctx_pool);
+	dma_pool_destroy(buffer_pool);
+
+	npe_release(npe_c);
+
+	if (crypt_virt) {
+		dma_free_coherent(dev,
+			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
+			crypt_virt, crypt_phys);
+	}
+}
+
+static void reset_sa_dir(struct ix_sa_dir *dir)
+{
+	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
+	dir->npe_ctx_idx = 0;
+	dir->npe_mode = 0;
+}
+
+static int init_sa_dir(struct ix_sa_dir *dir)
+{
+	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
+	if (!dir->npe_ctx) {
+		return -ENOMEM;
+	}
+	reset_sa_dir(dir);
+	return 0;
+}
+
+static void free_sa_dir(struct ix_sa_dir *dir)
+{
+	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
+	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
+}
+
+static int init_tfm(struct crypto_tfm *tfm)
+{
+	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	atomic_set(&ctx->configuring, 0);
+	ret = init_sa_dir(&ctx->encrypt);
+	if (ret)
+		return ret;
+	ret = init_sa_dir(&ctx->decrypt);
+	if (ret) {
+		free_sa_dir(&ctx->encrypt);
+	}
+	return ret;
+}
+
+static int init_tfm_ablk(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
+	return init_tfm(tfm);
+}
+
+static int init_tfm_aead(struct crypto_aead *tfm)
+{
+	crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+	return init_tfm(crypto_aead_tfm(tfm));
+}
+
+static void exit_tfm(struct crypto_tfm *tfm)
+{
+	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+	free_sa_dir(&ctx->encrypt);
+	free_sa_dir(&ctx->decrypt);
+}
+
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+	exit_tfm(crypto_aead_tfm(tfm));
+}
+
+static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
+		int init_len, u32 ctx_addr, const u8 *key, int key_len)
+{
+	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypt_ctl *crypt;
+	struct buffer_desc *buf;
+	int i;
+	u8 *pad;
+	u32 pad_phys, buf_phys;
+
+	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
+	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
+	if (!pad)
+		return -ENOMEM;
+	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
+	if (!buf) {
+		dma_pool_free(ctx_pool, pad, pad_phys);
+		return -ENOMEM;
+	}
+	crypt = get_crypt_desc_emerg();
+	if (!crypt) {
+		dma_pool_free(ctx_pool, pad, pad_phys);
+		dma_pool_free(buffer_pool, buf, buf_phys);
+		return -EAGAIN;
+	}
+
+	memcpy(pad, key, key_len);
+	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
+	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
+		pad[i] ^= xpad;
+	}
+
+	crypt->data.tfm = tfm;
+	crypt->regist_ptr = pad;
+	crypt->regist_buf = buf;
+
+	crypt->auth_offs = 0;
+	crypt->auth_len = HMAC_PAD_BLOCKLEN;
+	crypt->crypto_ctx = ctx_addr;
+	crypt->src_buf = buf_phys;
+	crypt->icv_rev_aes = target;
+	crypt->mode = NPE_OP_HASH_GEN_ICV;
+	crypt->init_len = init_len;
+	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
+
+	buf->next = 0;
+	buf->buf_len = HMAC_PAD_BLOCKLEN;
+	buf->pkt_len = 0;
+	buf->phys_addr = pad_phys;
+
+	atomic_inc(&ctx->configuring);
+	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
+	BUG_ON(qmgr_stat_overflow(SEND_QID));
+	return 0;
+}
+
+static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
+		const u8 *key, int key_len, unsigned digest_len)
+{
+	u32 itarget, otarget, npe_ctx_addr;
+	unsigned char *cinfo;
+	int init_len, ret = 0;
+	u32 cfgword;
+	struct ix_sa_dir *dir;
+	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+	const struct ix_hash_algo *algo;
+
+	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
+	algo = ix_hash(tfm);
+
+	/* write cfg word to cryptinfo */
+	cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
+#ifndef __ARMEB__
+	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
+#endif
+	*(u32*)cinfo = cpu_to_be32(cfgword);
+	cinfo += sizeof(cfgword);
+
+	/* write ICV to cryptinfo */
+	memcpy(cinfo, algo->icv, digest_len);
+	cinfo += digest_len;
+
+	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
+				+ sizeof(algo->cfgword);
+	otarget = itarget + digest_len;
+	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
+	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
+
+	dir->npe_ctx_idx += init_len;
+	dir->npe_mode |= NPE_OP_HASH_ENABLE;
+
+	if (!encrypt)
+		dir->npe_mode |= NPE_OP_HASH_VERIFY;
+
+	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
+			init_len, npe_ctx_addr, key, key_len);
+	if (ret)
+		return ret;
+	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
+			init_len, npe_ctx_addr, key, key_len);
+}
+
+static int gen_rev_aes_key(struct crypto_tfm *tfm)
+{
+	struct crypt_ctl *crypt;
+	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct ix_sa_dir *dir = &ctx->decrypt;
+
+	crypt = get_crypt_desc_emerg();
+	if (!crypt) {
+		return -EAGAIN;
+	}
+	*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
+
+	crypt->data.tfm = tfm;
+	crypt->crypt_offs = 0;
+	crypt->crypt_len = AES_BLOCK128;
+	crypt->src_buf = 0;
+	crypt->crypto_ctx = dir->npe_ctx_phys;
+	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
+	crypt->mode = NPE_OP_ENC_GEN_KEY;
+	crypt->init_len = dir->npe_ctx_idx;
+	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
+
+	atomic_inc(&ctx->configuring);
+	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
+	BUG_ON(qmgr_stat_overflow(SEND_QID));
+	return 0;
+}
+
+static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
+		const u8 *key, int key_len)
+{
+	u8 *cinfo;
+	u32 cipher_cfg;
+	u32 keylen_cfg = 0;
+	struct ix_sa_dir *dir;
+	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 *flags = &tfm->crt_flags;
+
+	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+	cinfo = dir->npe_ctx;
+
+	if (encrypt) {
+		cipher_cfg = cipher_cfg_enc(tfm);
+		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
+	} else {
+		cipher_cfg = cipher_cfg_dec(tfm);
+	}
+	if (cipher_cfg & MOD_AES) {
+		switch (key_len) {
+		case 16: keylen_cfg = MOD_AES128; break;
+		case 24: keylen_cfg = MOD_AES192; break;
+		case 32: keylen_cfg = MOD_AES256; break;
+		default:
+			*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+			return -EINVAL;
+		}
+		cipher_cfg |= keylen_cfg;
+	} else if (cipher_cfg & MOD_3DES) {
+		const u32 *K = (const u32 *)key;
+		if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+			     !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
+		{
+			*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
+			return -EINVAL;
+		}
+	} else {
+		u32 tmp[DES_EXPKEY_WORDS];
+		if (des_ekey(tmp, key) == 0) {
+			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		}
+	}
+	/* write cfg word to cryptinfo */
+	*(u32*)cinfo = cpu_to_be32(cipher_cfg);
+	cinfo += sizeof(cipher_cfg);
+
+	/* write cipher key to cryptinfo */
+	memcpy(cinfo, key, key_len);
+	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
+	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
+		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
+		key_len = DES3_EDE_KEY_SIZE;
+	}
+	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
+	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
+	if ((cipher_cfg & MOD_AES) && !encrypt) {
+		return gen_rev_aes_key(tfm);
+	}
+	return 0;
+}
+
+static struct buffer_desc *chainup_buffers(struct device *dev,
+		struct scatterlist *sg,	unsigned nbytes,
+		struct buffer_desc *buf, gfp_t flags,
+		enum dma_data_direction dir)
+{
+	for (; nbytes > 0; sg = sg_next(sg)) {
+		unsigned len = min(nbytes, sg->length);
+		struct buffer_desc *next_buf;
+		u32 next_buf_phys;
+		void *ptr;
+
+		nbytes -= len;
+		ptr = sg_virt(sg);
+		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
+		if (!next_buf) {
+			buf = NULL;
+			break;
+		}
+		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
+		buf->next = next_buf;
+		buf->phys_next = next_buf_phys;
+		buf = next_buf;
+
+		buf->phys_addr = sg_dma_address(sg);
+		buf->buf_len = len;
+		buf->dir = dir;
+	}
+	buf->next = NULL;
+	buf->phys_next = 0;
+	return buf;
+}
+
+static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			unsigned int key_len)
+{
+	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	u32 *flags = &tfm->base.crt_flags;
+	int ret;
+
+	init_completion(&ctx->completion);
+	atomic_inc(&ctx->configuring);
+
+	reset_sa_dir(&ctx->encrypt);
+	reset_sa_dir(&ctx->decrypt);
+
+	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
+	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
+
+	ret = setup_cipher(&tfm->base, 0, key, key_len);
+	if (ret)
+		goto out;
+	ret = setup_cipher(&tfm->base, 1, key, key_len);
+	if (ret)
+		goto out;
+
+	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
+		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+			ret = -EINVAL;
+		} else {
+			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
+		}
+	}
+out:
+	if (!atomic_dec_and_test(&ctx->configuring))
+		wait_for_completion(&ctx->completion);
+	return ret;
+}
+
+static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+		unsigned int key_len)
+{
+	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	/* the nonce is stored in bytes at end of key */
+	if (key_len < CTR_RFC3686_NONCE_SIZE)
+		return -EINVAL;
+
+	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
+			CTR_RFC3686_NONCE_SIZE);
+
+	key_len -= CTR_RFC3686_NONCE_SIZE;
+	return ablk_setkey(tfm, key, key_len);
+}
+
+static int ablk_perform(struct ablkcipher_request *req, int encrypt)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct ix_sa_dir *dir;
+	struct crypt_ctl *crypt;
+	unsigned int nbytes = req->nbytes;
+	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+	struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
+	struct buffer_desc src_hook;
+	struct device *dev = &pdev->dev;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+				GFP_KERNEL : GFP_ATOMIC;
+
+	if (qmgr_stat_full(SEND_QID))
+		return -EAGAIN;
+	if (atomic_read(&ctx->configuring))
+		return -EAGAIN;
+
+	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+
+	crypt = get_crypt_desc();
+	if (!crypt)
+		return -ENOMEM;
+
+	crypt->data.ablk_req = req;
+	crypt->crypto_ctx = dir->npe_ctx_phys;
+	crypt->mode = dir->npe_mode;
+	crypt->init_len = dir->npe_ctx_idx;
+
+	crypt->crypt_offs = 0;
+	crypt->crypt_len = nbytes;
+
+	BUG_ON(ivsize && !req->info);
+	memcpy(crypt->iv, req->info, ivsize);
+	if (req->src != req->dst) {
+		struct buffer_desc dst_hook;
+		crypt->mode |= NPE_OP_NOT_IN_PLACE;
+		/* This was never tested by Intel
+		 * for more than one dst buffer, I think. */
+		req_ctx->dst = NULL;
+		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+					flags, DMA_FROM_DEVICE))
+			goto free_buf_dest;
+		src_direction = DMA_TO_DEVICE;
+		req_ctx->dst = dst_hook.next;
+		crypt->dst_buf = dst_hook.phys_next;
+	} else {
+		req_ctx->dst = NULL;
+	}
+	req_ctx->src = NULL;
+	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
+				flags, src_direction))
+		goto free_buf_src;
+
+	req_ctx->src = src_hook.next;
+	crypt->src_buf = src_hook.phys_next;
+	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
+	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
+	BUG_ON(qmgr_stat_overflow(SEND_QID));
+	return -EINPROGRESS;
+
+free_buf_src:
+	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dest:
+	if (req->src != req->dst) {
+		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+	}
+	crypt->ctl_flags = CTL_FLAG_UNUSED;
+	return -ENOMEM;
+}
+
+static int ablk_encrypt(struct ablkcipher_request *req)
+{
+	return ablk_perform(req, 1);
+}
+
+static int ablk_decrypt(struct ablkcipher_request *req)
+{
+	return ablk_perform(req, 0);
+}
+
+static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	u8 iv[CTR_RFC3686_BLOCK_SIZE];
+	u8 *info = req->info;
+	int ret;
+
+	/* set up counter block */
+        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
+	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
+
+	/* initialize counter portion of counter block */
+	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
+		cpu_to_be32(1);
+
+	req->info = iv;
+	ret = ablk_perform(req, 1);
+	req->info = info;
+	return ret;
+}
+
+static int aead_perform(struct aead_request *req, int encrypt,
+		int cryptoffset, int eff_cryptlen, u8 *iv)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+	unsigned ivsize = crypto_aead_ivsize(tfm);
+	unsigned authsize = crypto_aead_authsize(tfm);
+	struct ix_sa_dir *dir;
+	struct crypt_ctl *crypt;
+	unsigned int cryptlen;
+	struct buffer_desc *buf, src_hook;
+	struct aead_ctx *req_ctx = aead_request_ctx(req);
+	struct device *dev = &pdev->dev;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+				GFP_KERNEL : GFP_ATOMIC;
+	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+	unsigned int lastlen;
+
+	if (qmgr_stat_full(SEND_QID))
+		return -EAGAIN;
+	if (atomic_read(&ctx->configuring))
+		return -EAGAIN;
+
+	if (encrypt) {
+		dir = &ctx->encrypt;
+		cryptlen = req->cryptlen;
+	} else {
+		dir = &ctx->decrypt;
+		/* req->cryptlen includes the authsize when decrypting */
+		cryptlen = req->cryptlen -authsize;
+		eff_cryptlen -= authsize;
+	}
+	crypt = get_crypt_desc();
+	if (!crypt)
+		return -ENOMEM;
+
+	crypt->data.aead_req = req;
+	crypt->crypto_ctx = dir->npe_ctx_phys;
+	crypt->mode = dir->npe_mode;
+	crypt->init_len = dir->npe_ctx_idx;
+
+	crypt->crypt_offs = cryptoffset;
+	crypt->crypt_len = eff_cryptlen;
+
+	crypt->auth_offs = 0;
+	crypt->auth_len = req->assoclen + cryptlen;
+	BUG_ON(ivsize && !req->iv);
+	memcpy(crypt->iv, req->iv, ivsize);
+
+	buf = chainup_buffers(dev, req->src, crypt->auth_len,
+			      &src_hook, flags, src_direction);
+	req_ctx->src = src_hook.next;
+	crypt->src_buf = src_hook.phys_next;
+	if (!buf)
+		goto free_buf_src;
+
+	lastlen = buf->buf_len;
+	if (lastlen >= authsize)
+		crypt->icv_rev_aes = buf->phys_addr +
+				     buf->buf_len - authsize;
+
+	req_ctx->dst = NULL;
+
+	if (req->src != req->dst) {
+		struct buffer_desc dst_hook;
+
+		crypt->mode |= NPE_OP_NOT_IN_PLACE;
+		src_direction = DMA_TO_DEVICE;
+
+		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+				      &dst_hook, flags, DMA_FROM_DEVICE);
+		req_ctx->dst = dst_hook.next;
+		crypt->dst_buf = dst_hook.phys_next;
+
+		if (!buf)
+			goto free_buf_dst;
+
+		if (encrypt) {
+			lastlen = buf->buf_len;
+			if (lastlen >= authsize)
+				crypt->icv_rev_aes = buf->phys_addr +
+						     buf->buf_len - authsize;
+		}
+	}
+
+	if (unlikely(lastlen < authsize)) {
+		/* The 12 hmac bytes are scattered,
+		 * we need to copy them into a safe buffer */
+		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
+				&crypt->icv_rev_aes);
+		if (unlikely(!req_ctx->hmac_virt))
+			goto free_buf_dst;
+		if (!encrypt) {
+			scatterwalk_map_and_copy(req_ctx->hmac_virt,
+				req->src, cryptlen, authsize, 0);
+		}
+		req_ctx->encrypt = encrypt;
+	} else {
+		req_ctx->hmac_virt = NULL;
+	}
+
+	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
+	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
+	BUG_ON(qmgr_stat_overflow(SEND_QID));
+	return -EINPROGRESS;
+
+free_buf_dst:
+	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+	crypt->ctl_flags = CTL_FLAG_UNUSED;
+	return -ENOMEM;
+}
+
+static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
+{
+	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+	u32 *flags = &tfm->base.crt_flags;
+	unsigned digest_len = crypto_aead_maxauthsize(tfm);
+	int ret;
+
+	if (!ctx->enckey_len && !ctx->authkey_len)
+		return 0;
+	init_completion(&ctx->completion);
+	atomic_inc(&ctx->configuring);
+
+	reset_sa_dir(&ctx->encrypt);
+	reset_sa_dir(&ctx->decrypt);
+
+	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
+	if (ret)
+		goto out;
+	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
+	if (ret)
+		goto out;
+	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
+			ctx->authkey_len, digest_len);
+	if (ret)
+		goto out;
+	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
+			ctx->authkey_len, digest_len);
+	if (ret)
+		goto out;
+
+	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
+		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+			ret = -EINVAL;
+			goto out;
+		} else {
+			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
+		}
+	}
+out:
+	if (!atomic_dec_and_test(&ctx->configuring))
+		wait_for_completion(&ctx->completion);
+	return ret;
+}
+
+static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+	int max = crypto_aead_maxauthsize(tfm) >> 2;
+
+	if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
+		return -EINVAL;
+	return aead_setup(tfm, authsize);
+}
+
+static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_authenc_keys keys;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+	if (keys.authkeylen > sizeof(ctx->authkey))
+		goto badkey;
+
+	if (keys.enckeylen > sizeof(ctx->enckey))
+		goto badkey;
+
+	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+	ctx->authkey_len = keys.authkeylen;
+	ctx->enckey_len = keys.enckeylen;
+
+	memzero_explicit(&keys, sizeof(keys));
+	return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
+}
+
+static struct ixp_alg ixp4xx_algos[] = {
+{
+	.crypto	= {
+		.cra_name	= "cbc(des)",
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_u		= { .ablkcipher = {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
+			.geniv		= "eseqiv",
+			}
+		}
+	},
+	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+
+}, {
+	.crypto	= {
+		.cra_name	= "ecb(des)",
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_u		= { .ablkcipher = {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			}
+		}
+	},
+	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
+	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
+}, {
+	.crypto	= {
+		.cra_name	= "cbc(des3_ede)",
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_u		= { .ablkcipher = {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES3_EDE_BLOCK_SIZE,
+			.geniv		= "eseqiv",
+			}
+		}
+	},
+	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+	.crypto	= {
+		.cra_name	= "ecb(des3_ede)",
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_u		= { .ablkcipher = {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			}
+		}
+	},
+	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
+	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
+}, {
+	.crypto	= {
+		.cra_name	= "cbc(aes)",
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_u		= { .ablkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.geniv		= "eseqiv",
+			}
+		}
+	},
+	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+}, {
+	.crypto	= {
+		.cra_name	= "ecb(aes)",
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_u		= { .ablkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			}
+		}
+	},
+	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
+	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
+}, {
+	.crypto	= {
+		.cra_name	= "ctr(aes)",
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_u		= { .ablkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.geniv		= "eseqiv",
+			}
+		}
+	},
+	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
+	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
+}, {
+	.crypto	= {
+		.cra_name	= "rfc3686(ctr(aes))",
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_u		= { .ablkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.geniv		= "eseqiv",
+			.setkey		= ablk_rfc3686_setkey,
+			.encrypt	= ablk_rfc3686_crypt,
+			.decrypt	= ablk_rfc3686_crypt }
+		}
+	},
+	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
+	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
+	.crypto	= {
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(des))",
+			.cra_blocksize	= DES_BLOCK_SIZE,
+		},
+		.ivsize		= DES_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
+	},
+	.hash = &hash_alg_md5,
+	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+	.crypto	= {
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
+			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		},
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
+	},
+	.hash = &hash_alg_md5,
+	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+	.crypto	= {
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(des))",
+			.cra_blocksize	= DES_BLOCK_SIZE,
+		},
+			.ivsize		= DES_BLOCK_SIZE,
+			.maxauthsize	= SHA1_DIGEST_SIZE,
+	},
+	.hash = &hash_alg_sha1,
+	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+	.crypto	= {
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
+			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		},
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.maxauthsize	= SHA1_DIGEST_SIZE,
+	},
+	.hash = &hash_alg_sha1,
+	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+	.crypto	= {
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(aes))",
+			.cra_blocksize	= AES_BLOCK_SIZE,
+		},
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
+	},
+	.hash = &hash_alg_md5,
+	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+}, {
+	.crypto	= {
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(aes))",
+			.cra_blocksize	= AES_BLOCK_SIZE,
+		},
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize	= SHA1_DIGEST_SIZE,
+	},
+	.hash = &hash_alg_sha1,
+	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+} };
+
+#define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+	.name		= DRIVER_NAME,
+	.id		= 0,
+	.dma_mask	= DMA_BIT_MASK(32),
+};
+
+static int __init ixp_module_init(void)
+{
+	int num = ARRAY_SIZE(ixp4xx_algos);
+	int i, err;
+
+	pdev = platform_device_register_full(&ixp_dev_info);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	spin_lock_init(&desc_lock);
+	spin_lock_init(&emerg_lock);
+
+	err = init_ixp_crypto(&pdev->dev);
+	if (err) {
+		platform_device_unregister(pdev);
+		return err;
+	}
+	for (i=0; i< num; i++) {
+		struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
+
+		if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
+			"%s"IXP_POSTFIX, cra->cra_name) >=
+			CRYPTO_MAX_ALG_NAME)
+		{
+			continue;
+		}
+		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
+			continue;
+		}
+
+		/* block ciphers */
+		cra->cra_type = &crypto_ablkcipher_type;
+		cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				 CRYPTO_ALG_KERN_DRIVER_ONLY |
+				 CRYPTO_ALG_ASYNC;
+		if (!cra->cra_ablkcipher.setkey)
+			cra->cra_ablkcipher.setkey = ablk_setkey;
+		if (!cra->cra_ablkcipher.encrypt)
+			cra->cra_ablkcipher.encrypt = ablk_encrypt;
+		if (!cra->cra_ablkcipher.decrypt)
+			cra->cra_ablkcipher.decrypt = ablk_decrypt;
+		cra->cra_init = init_tfm_ablk;
+
+		cra->cra_ctxsize = sizeof(struct ixp_ctx);
+		cra->cra_module = THIS_MODULE;
+		cra->cra_alignmask = 3;
+		cra->cra_priority = 300;
+		cra->cra_exit = exit_tfm;
+		if (crypto_register_alg(cra))
+			printk(KERN_ERR "Failed to register '%s'\n",
+				cra->cra_name);
+		else
+			ixp4xx_algos[i].registered = 1;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+		struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
+		    CRYPTO_MAX_ALG_NAME)
+			continue;
+		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+			continue;
+
+		/* authenc */
+		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+				      CRYPTO_ALG_ASYNC;
+		cra->setkey = aead_setkey;
+		cra->setauthsize = aead_setauthsize;
+		cra->encrypt = aead_encrypt;
+		cra->decrypt = aead_decrypt;
+		cra->init = init_tfm_aead;
+		cra->exit = exit_tfm_aead;
+
+		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+		cra->base.cra_module = THIS_MODULE;
+		cra->base.cra_alignmask = 3;
+		cra->base.cra_priority = 300;
+
+		if (crypto_register_aead(cra))
+			printk(KERN_ERR "Failed to register '%s'\n",
+				cra->base.cra_driver_name);
+		else
+			ixp4xx_aeads[i].registered = 1;
+	}
+	return 0;
+}
+
+static void __exit ixp_module_exit(void)
+{
+	int num = ARRAY_SIZE(ixp4xx_algos);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+		if (ixp4xx_aeads[i].registered)
+			crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+	}
+
+	for (i=0; i< num; i++) {
+		if (ixp4xx_algos[i].registered)
+			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
+	}
+	release_ixp_crypto(&pdev->dev);
+	platform_device_unregister(pdev);
+}
+
+module_init(ixp_module_init);
+module_exit(ixp_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
+MODULE_DESCRIPTION("IXP4xx hardware crypto");
+
diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile
new file mode 100644
index 0000000..0c12b13
--- /dev/null
+++ b/drivers/crypto/marvell/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o
+marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
new file mode 100644
index 0000000..a4aa681
--- /dev/null
+++ b/drivers/crypto/marvell/cesa.c
@@ -0,0 +1,618 @@
+/*
+ * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
+ * that can be found on the following platform: Orion, Kirkwood, Armada. This
+ * driver supports the TDMA engine on platforms on which it is available.
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Author: Arnaud Ebalard <arno@natisbad.org>
+ *
+ * This work is based on an initial version written by
+ * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/mbus.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+
+#include "cesa.h"
+
+/* Limit of the crypto queue before reaching the backlog */
+#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
+
+struct mv_cesa_dev *cesa_dev;
+
+struct crypto_async_request *
+mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
+			   struct crypto_async_request **backlog)
+{
+	struct crypto_async_request *req;
+
+	*backlog = crypto_get_backlog(&engine->queue);
+	req = crypto_dequeue_request(&engine->queue);
+
+	if (!req)
+		return NULL;
+
+	return req;
+}
+
+static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
+{
+	struct crypto_async_request *req = NULL, *backlog = NULL;
+	struct mv_cesa_ctx *ctx;
+
+
+	spin_lock_bh(&engine->lock);
+	if (!engine->req) {
+		req = mv_cesa_dequeue_req_locked(engine, &backlog);
+		engine->req = req;
+	}
+	spin_unlock_bh(&engine->lock);
+
+	if (!req)
+		return;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	ctx = crypto_tfm_ctx(req->tfm);
+	ctx->ops->step(req);
+}
+
+static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
+{
+	struct crypto_async_request *req;
+	struct mv_cesa_ctx *ctx;
+	int res;
+
+	req = engine->req;
+	ctx = crypto_tfm_ctx(req->tfm);
+	res = ctx->ops->process(req, status);
+
+	if (res == 0) {
+		ctx->ops->complete(req);
+		mv_cesa_engine_enqueue_complete_request(engine, req);
+	} else if (res == -EINPROGRESS) {
+		ctx->ops->step(req);
+	}
+
+	return res;
+}
+
+static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
+{
+	if (engine->chain.first && engine->chain.last)
+		return mv_cesa_tdma_process(engine, status);
+
+	return mv_cesa_std_process(engine, status);
+}
+
+static inline void
+mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
+		     int res)
+{
+	ctx->ops->cleanup(req);
+	local_bh_disable();
+	req->complete(req, res);
+	local_bh_enable();
+}
+
+static irqreturn_t mv_cesa_int(int irq, void *priv)
+{
+	struct mv_cesa_engine *engine = priv;
+	struct crypto_async_request *req;
+	struct mv_cesa_ctx *ctx;
+	u32 status, mask;
+	irqreturn_t ret = IRQ_NONE;
+
+	while (true) {
+		int res;
+
+		mask = mv_cesa_get_int_mask(engine);
+		status = readl(engine->regs + CESA_SA_INT_STATUS);
+
+		if (!(status & mask))
+			break;
+
+		/*
+		 * TODO: avoid clearing the FPGA_INT_STATUS if this not
+		 * relevant on some platforms.
+		 */
+		writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
+		writel(~status, engine->regs + CESA_SA_INT_STATUS);
+
+		/* Process fetched requests */
+		res = mv_cesa_int_process(engine, status & mask);
+		ret = IRQ_HANDLED;
+
+		spin_lock_bh(&engine->lock);
+		req = engine->req;
+		if (res != -EINPROGRESS)
+			engine->req = NULL;
+		spin_unlock_bh(&engine->lock);
+
+		ctx = crypto_tfm_ctx(req->tfm);
+
+		if (res && res != -EINPROGRESS)
+			mv_cesa_complete_req(ctx, req, res);
+
+		/* Launch the next pending request */
+		mv_cesa_rearm_engine(engine);
+
+		/* Iterate over the complete queue */
+		while (true) {
+			req = mv_cesa_engine_dequeue_complete_request(engine);
+			if (!req)
+				break;
+
+			ctx = crypto_tfm_ctx(req->tfm);
+			mv_cesa_complete_req(ctx, req, 0);
+		}
+	}
+
+	return ret;
+}
+
+int mv_cesa_queue_req(struct crypto_async_request *req,
+		      struct mv_cesa_req *creq)
+{
+	int ret;
+	struct mv_cesa_engine *engine = creq->engine;
+
+	spin_lock_bh(&engine->lock);
+	ret = crypto_enqueue_request(&engine->queue, req);
+	if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
+	    (ret == -EINPROGRESS || ret == -EBUSY))
+		mv_cesa_tdma_chain(engine, creq);
+	spin_unlock_bh(&engine->lock);
+
+	if (ret != -EINPROGRESS)
+		return ret;
+
+	mv_cesa_rearm_engine(engine);
+
+	return -EINPROGRESS;
+}
+
+static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
+{
+	int ret;
+	int i, j;
+
+	for (i = 0; i < cesa->caps->ncipher_algs; i++) {
+		ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
+		if (ret)
+			goto err_unregister_crypto;
+	}
+
+	for (i = 0; i < cesa->caps->nahash_algs; i++) {
+		ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
+		if (ret)
+			goto err_unregister_ahash;
+	}
+
+	return 0;
+
+err_unregister_ahash:
+	for (j = 0; j < i; j++)
+		crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
+	i = cesa->caps->ncipher_algs;
+
+err_unregister_crypto:
+	for (j = 0; j < i; j++)
+		crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
+
+	return ret;
+}
+
+static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
+{
+	int i;
+
+	for (i = 0; i < cesa->caps->nahash_algs; i++)
+		crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
+
+	for (i = 0; i < cesa->caps->ncipher_algs; i++)
+		crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
+}
+
+static struct skcipher_alg *orion_cipher_algs[] = {
+	&mv_cesa_ecb_des_alg,
+	&mv_cesa_cbc_des_alg,
+	&mv_cesa_ecb_des3_ede_alg,
+	&mv_cesa_cbc_des3_ede_alg,
+	&mv_cesa_ecb_aes_alg,
+	&mv_cesa_cbc_aes_alg,
+};
+
+static struct ahash_alg *orion_ahash_algs[] = {
+	&mv_md5_alg,
+	&mv_sha1_alg,
+	&mv_ahmac_md5_alg,
+	&mv_ahmac_sha1_alg,
+};
+
+static struct skcipher_alg *armada_370_cipher_algs[] = {
+	&mv_cesa_ecb_des_alg,
+	&mv_cesa_cbc_des_alg,
+	&mv_cesa_ecb_des3_ede_alg,
+	&mv_cesa_cbc_des3_ede_alg,
+	&mv_cesa_ecb_aes_alg,
+	&mv_cesa_cbc_aes_alg,
+};
+
+static struct ahash_alg *armada_370_ahash_algs[] = {
+	&mv_md5_alg,
+	&mv_sha1_alg,
+	&mv_sha256_alg,
+	&mv_ahmac_md5_alg,
+	&mv_ahmac_sha1_alg,
+	&mv_ahmac_sha256_alg,
+};
+
+static const struct mv_cesa_caps orion_caps = {
+	.nengines = 1,
+	.cipher_algs = orion_cipher_algs,
+	.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
+	.ahash_algs = orion_ahash_algs,
+	.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
+	.has_tdma = false,
+};
+
+static const struct mv_cesa_caps kirkwood_caps = {
+	.nengines = 1,
+	.cipher_algs = orion_cipher_algs,
+	.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
+	.ahash_algs = orion_ahash_algs,
+	.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
+	.has_tdma = true,
+};
+
+static const struct mv_cesa_caps armada_370_caps = {
+	.nengines = 1,
+	.cipher_algs = armada_370_cipher_algs,
+	.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
+	.ahash_algs = armada_370_ahash_algs,
+	.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
+	.has_tdma = true,
+};
+
+static const struct mv_cesa_caps armada_xp_caps = {
+	.nengines = 2,
+	.cipher_algs = armada_370_cipher_algs,
+	.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
+	.ahash_algs = armada_370_ahash_algs,
+	.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
+	.has_tdma = true,
+};
+
+static const struct of_device_id mv_cesa_of_match_table[] = {
+	{ .compatible = "marvell,orion-crypto", .data = &orion_caps },
+	{ .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
+	{ .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
+	{ .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
+	{ .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
+	{ .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
+	{ .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
+	{}
+};
+MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
+
+static void
+mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
+			  const struct mbus_dram_target_info *dram)
+{
+	void __iomem *iobase = engine->regs;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
+		writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
+	}
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		writel(((cs->size - 1) & 0xffff0000) |
+		       (cs->mbus_attr << 8) |
+		       (dram->mbus_dram_target_id << 4) | 1,
+		       iobase + CESA_TDMA_WINDOW_CTRL(i));
+		writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
+	}
+}
+
+static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
+{
+	struct device *dev = cesa->dev;
+	struct mv_cesa_dev_dma *dma;
+
+	if (!cesa->caps->has_tdma)
+		return 0;
+
+	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+	if (!dma)
+		return -ENOMEM;
+
+	dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
+					sizeof(struct mv_cesa_tdma_desc),
+					16, 0);
+	if (!dma->tdma_desc_pool)
+		return -ENOMEM;
+
+	dma->op_pool = dmam_pool_create("cesa_op", dev,
+					sizeof(struct mv_cesa_op_ctx), 16, 0);
+	if (!dma->op_pool)
+		return -ENOMEM;
+
+	dma->cache_pool = dmam_pool_create("cesa_cache", dev,
+					   CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
+	if (!dma->cache_pool)
+		return -ENOMEM;
+
+	dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
+	if (!dma->padding_pool)
+		return -ENOMEM;
+
+	cesa->dma = dma;
+
+	return 0;
+}
+
+static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
+{
+	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
+	struct mv_cesa_engine *engine = &cesa->engines[idx];
+	const char *res_name = "sram";
+	struct resource *res;
+
+	engine->pool = of_gen_pool_get(cesa->dev->of_node,
+				       "marvell,crypto-srams", idx);
+	if (engine->pool) {
+		engine->sram = gen_pool_dma_alloc(engine->pool,
+						  cesa->sram_size,
+						  &engine->sram_dma);
+		if (engine->sram)
+			return 0;
+
+		engine->pool = NULL;
+		return -ENOMEM;
+	}
+
+	if (cesa->caps->nengines > 1) {
+		if (!idx)
+			res_name = "sram0";
+		else
+			res_name = "sram1";
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   res_name);
+	if (!res || resource_size(res) < cesa->sram_size)
+		return -EINVAL;
+
+	engine->sram = devm_ioremap_resource(cesa->dev, res);
+	if (IS_ERR(engine->sram))
+		return PTR_ERR(engine->sram);
+
+	engine->sram_dma = dma_map_resource(cesa->dev, res->start,
+					    cesa->sram_size,
+					    DMA_BIDIRECTIONAL, 0);
+	if (dma_mapping_error(cesa->dev, engine->sram_dma))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
+{
+	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
+	struct mv_cesa_engine *engine = &cesa->engines[idx];
+
+	if (engine->pool)
+		gen_pool_free(engine->pool, (unsigned long)engine->sram,
+			      cesa->sram_size);
+	else
+		dma_unmap_resource(cesa->dev, engine->sram_dma,
+				   cesa->sram_size, DMA_BIDIRECTIONAL, 0);
+}
+
+static int mv_cesa_probe(struct platform_device *pdev)
+{
+	const struct mv_cesa_caps *caps = &orion_caps;
+	const struct mbus_dram_target_info *dram;
+	const struct of_device_id *match;
+	struct device *dev = &pdev->dev;
+	struct mv_cesa_dev *cesa;
+	struct mv_cesa_engine *engines;
+	struct resource *res;
+	int irq, ret, i;
+	u32 sram_size;
+
+	if (cesa_dev) {
+		dev_err(&pdev->dev, "Only one CESA device authorized\n");
+		return -EEXIST;
+	}
+
+	if (dev->of_node) {
+		match = of_match_node(mv_cesa_of_match_table, dev->of_node);
+		if (!match || !match->data)
+			return -ENOTSUPP;
+
+		caps = match->data;
+	}
+
+	cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
+	if (!cesa)
+		return -ENOMEM;
+
+	cesa->caps = caps;
+	cesa->dev = dev;
+
+	sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
+	of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
+			     &sram_size);
+	if (sram_size < CESA_SA_MIN_SRAM_SIZE)
+		sram_size = CESA_SA_MIN_SRAM_SIZE;
+
+	cesa->sram_size = sram_size;
+	cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
+				     GFP_KERNEL);
+	if (!cesa->engines)
+		return -ENOMEM;
+
+	spin_lock_init(&cesa->lock);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	cesa->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(cesa->regs))
+		return PTR_ERR(cesa->regs);
+
+	ret = mv_cesa_dev_dma_init(cesa);
+	if (ret)
+		return ret;
+
+	dram = mv_mbus_dram_info_nooverlap();
+
+	platform_set_drvdata(pdev, cesa);
+
+	for (i = 0; i < caps->nengines; i++) {
+		struct mv_cesa_engine *engine = &cesa->engines[i];
+		char res_name[7];
+
+		engine->id = i;
+		spin_lock_init(&engine->lock);
+
+		ret = mv_cesa_get_sram(pdev, i);
+		if (ret)
+			goto err_cleanup;
+
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0) {
+			ret = irq;
+			goto err_cleanup;
+		}
+
+		/*
+		 * Not all platforms can gate the CESA clocks: do not complain
+		 * if the clock does not exist.
+		 */
+		snprintf(res_name, sizeof(res_name), "cesa%d", i);
+		engine->clk = devm_clk_get(dev, res_name);
+		if (IS_ERR(engine->clk)) {
+			engine->clk = devm_clk_get(dev, NULL);
+			if (IS_ERR(engine->clk))
+				engine->clk = NULL;
+		}
+
+		snprintf(res_name, sizeof(res_name), "cesaz%d", i);
+		engine->zclk = devm_clk_get(dev, res_name);
+		if (IS_ERR(engine->zclk))
+			engine->zclk = NULL;
+
+		ret = clk_prepare_enable(engine->clk);
+		if (ret)
+			goto err_cleanup;
+
+		ret = clk_prepare_enable(engine->zclk);
+		if (ret)
+			goto err_cleanup;
+
+		engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
+
+		if (dram && cesa->caps->has_tdma)
+			mv_cesa_conf_mbus_windows(engine, dram);
+
+		writel(0, engine->regs + CESA_SA_INT_STATUS);
+		writel(CESA_SA_CFG_STOP_DIG_ERR,
+		       engine->regs + CESA_SA_CFG);
+		writel(engine->sram_dma & CESA_SA_SRAM_MSK,
+		       engine->regs + CESA_SA_DESC_P0);
+
+		ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
+						IRQF_ONESHOT,
+						dev_name(&pdev->dev),
+						engine);
+		if (ret)
+			goto err_cleanup;
+
+		crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
+		atomic_set(&engine->load, 0);
+		INIT_LIST_HEAD(&engine->complete_queue);
+	}
+
+	cesa_dev = cesa;
+
+	ret = mv_cesa_add_algs(cesa);
+	if (ret) {
+		cesa_dev = NULL;
+		goto err_cleanup;
+	}
+
+	dev_info(dev, "CESA device successfully registered\n");
+
+	return 0;
+
+err_cleanup:
+	for (i = 0; i < caps->nengines; i++) {
+		clk_disable_unprepare(cesa->engines[i].zclk);
+		clk_disable_unprepare(cesa->engines[i].clk);
+		mv_cesa_put_sram(pdev, i);
+	}
+
+	return ret;
+}
+
+static int mv_cesa_remove(struct platform_device *pdev)
+{
+	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
+	int i;
+
+	mv_cesa_remove_algs(cesa);
+
+	for (i = 0; i < cesa->caps->nengines; i++) {
+		clk_disable_unprepare(cesa->engines[i].zclk);
+		clk_disable_unprepare(cesa->engines[i].clk);
+		mv_cesa_put_sram(pdev, i);
+	}
+
+	return 0;
+}
+
+static const struct platform_device_id mv_cesa_plat_id_table[] = {
+	{ .name = "mv_crypto" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
+
+static struct platform_driver marvell_cesa = {
+	.probe		= mv_cesa_probe,
+	.remove		= mv_cesa_remove,
+	.id_table	= mv_cesa_plat_id_table,
+	.driver		= {
+		.name	= "marvell-cesa",
+		.of_match_table = mv_cesa_of_match_table,
+	},
+};
+module_platform_driver(marvell_cesa);
+
+MODULE_ALIAS("platform:mv_crypto");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
new file mode 100644
index 0000000..d63a6ee
--- /dev/null
+++ b/drivers/crypto/marvell/cesa.h
@@ -0,0 +1,880 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MARVELL_CESA_H__
+#define __MARVELL_CESA_H__
+
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include <linux/crypto.h>
+#include <linux/dmapool.h>
+
+#define CESA_ENGINE_OFF(i)			(((i) * 0x2000))
+
+#define CESA_TDMA_BYTE_CNT			0x800
+#define CESA_TDMA_SRC_ADDR			0x810
+#define CESA_TDMA_DST_ADDR			0x820
+#define CESA_TDMA_NEXT_ADDR			0x830
+
+#define CESA_TDMA_CONTROL			0x840
+#define CESA_TDMA_DST_BURST			GENMASK(2, 0)
+#define CESA_TDMA_DST_BURST_32B			3
+#define CESA_TDMA_DST_BURST_128B		4
+#define CESA_TDMA_OUT_RD_EN			BIT(4)
+#define CESA_TDMA_SRC_BURST			GENMASK(8, 6)
+#define CESA_TDMA_SRC_BURST_32B			(3 << 6)
+#define CESA_TDMA_SRC_BURST_128B		(4 << 6)
+#define CESA_TDMA_CHAIN				BIT(9)
+#define CESA_TDMA_BYTE_SWAP			BIT(11)
+#define CESA_TDMA_NO_BYTE_SWAP			BIT(11)
+#define CESA_TDMA_EN				BIT(12)
+#define CESA_TDMA_FETCH_ND			BIT(13)
+#define CESA_TDMA_ACT				BIT(14)
+
+#define CESA_TDMA_CUR				0x870
+#define CESA_TDMA_ERROR_CAUSE			0x8c8
+#define CESA_TDMA_ERROR_MSK			0x8cc
+
+#define CESA_TDMA_WINDOW_BASE(x)		(((x) * 0x8) + 0xa00)
+#define CESA_TDMA_WINDOW_CTRL(x)		(((x) * 0x8) + 0xa04)
+
+#define CESA_IVDIG(x)				(0xdd00 + ((x) * 4) +	\
+						 (((x) < 5) ? 0 : 0x14))
+
+#define CESA_SA_CMD				0xde00
+#define CESA_SA_CMD_EN_CESA_SA_ACCL0		BIT(0)
+#define CESA_SA_CMD_EN_CESA_SA_ACCL1		BIT(1)
+#define CESA_SA_CMD_DISABLE_SEC			BIT(2)
+
+#define CESA_SA_DESC_P0				0xde04
+
+#define CESA_SA_DESC_P1				0xde14
+
+#define CESA_SA_CFG				0xde08
+#define CESA_SA_CFG_STOP_DIG_ERR		GENMASK(1, 0)
+#define CESA_SA_CFG_DIG_ERR_CONT		0
+#define CESA_SA_CFG_DIG_ERR_SKIP		1
+#define CESA_SA_CFG_DIG_ERR_STOP		3
+#define CESA_SA_CFG_CH0_W_IDMA			BIT(7)
+#define CESA_SA_CFG_CH1_W_IDMA			BIT(8)
+#define CESA_SA_CFG_ACT_CH0_IDMA		BIT(9)
+#define CESA_SA_CFG_ACT_CH1_IDMA		BIT(10)
+#define CESA_SA_CFG_MULTI_PKT			BIT(11)
+#define CESA_SA_CFG_PARA_DIS			BIT(13)
+
+#define CESA_SA_ACCEL_STATUS			0xde0c
+#define CESA_SA_ST_ACT_0			BIT(0)
+#define CESA_SA_ST_ACT_1			BIT(1)
+
+/*
+ * CESA_SA_FPGA_INT_STATUS looks like a FPGA leftover and is documented only
+ * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA
+ * and someone forgot to remove  it while switching to the core and moving to
+ * CESA_SA_INT_STATUS.
+ */
+#define CESA_SA_FPGA_INT_STATUS			0xdd68
+#define CESA_SA_INT_STATUS			0xde20
+#define CESA_SA_INT_AUTH_DONE			BIT(0)
+#define CESA_SA_INT_DES_E_DONE			BIT(1)
+#define CESA_SA_INT_AES_E_DONE			BIT(2)
+#define CESA_SA_INT_AES_D_DONE			BIT(3)
+#define CESA_SA_INT_ENC_DONE			BIT(4)
+#define CESA_SA_INT_ACCEL0_DONE			BIT(5)
+#define CESA_SA_INT_ACCEL1_DONE			BIT(6)
+#define CESA_SA_INT_ACC0_IDMA_DONE		BIT(7)
+#define CESA_SA_INT_ACC1_IDMA_DONE		BIT(8)
+#define CESA_SA_INT_IDMA_DONE			BIT(9)
+#define CESA_SA_INT_IDMA_OWN_ERR		BIT(10)
+
+#define CESA_SA_INT_MSK				0xde24
+
+#define CESA_SA_DESC_CFG_OP_MAC_ONLY		0
+#define CESA_SA_DESC_CFG_OP_CRYPT_ONLY		1
+#define CESA_SA_DESC_CFG_OP_MAC_CRYPT		2
+#define CESA_SA_DESC_CFG_OP_CRYPT_MAC		3
+#define CESA_SA_DESC_CFG_OP_MSK			GENMASK(1, 0)
+#define CESA_SA_DESC_CFG_MACM_SHA256		(1 << 4)
+#define CESA_SA_DESC_CFG_MACM_HMAC_SHA256	(3 << 4)
+#define CESA_SA_DESC_CFG_MACM_MD5		(4 << 4)
+#define CESA_SA_DESC_CFG_MACM_SHA1		(5 << 4)
+#define CESA_SA_DESC_CFG_MACM_HMAC_MD5		(6 << 4)
+#define CESA_SA_DESC_CFG_MACM_HMAC_SHA1		(7 << 4)
+#define CESA_SA_DESC_CFG_MACM_MSK		GENMASK(6, 4)
+#define CESA_SA_DESC_CFG_CRYPTM_DES		(1 << 8)
+#define CESA_SA_DESC_CFG_CRYPTM_3DES		(2 << 8)
+#define CESA_SA_DESC_CFG_CRYPTM_AES		(3 << 8)
+#define CESA_SA_DESC_CFG_CRYPTM_MSK		GENMASK(9, 8)
+#define CESA_SA_DESC_CFG_DIR_ENC		(0 << 12)
+#define CESA_SA_DESC_CFG_DIR_DEC		(1 << 12)
+#define CESA_SA_DESC_CFG_CRYPTCM_ECB		(0 << 16)
+#define CESA_SA_DESC_CFG_CRYPTCM_CBC		(1 << 16)
+#define CESA_SA_DESC_CFG_CRYPTCM_MSK		BIT(16)
+#define CESA_SA_DESC_CFG_3DES_EEE		(0 << 20)
+#define CESA_SA_DESC_CFG_3DES_EDE		(1 << 20)
+#define CESA_SA_DESC_CFG_AES_LEN_128		(0 << 24)
+#define CESA_SA_DESC_CFG_AES_LEN_192		(1 << 24)
+#define CESA_SA_DESC_CFG_AES_LEN_256		(2 << 24)
+#define CESA_SA_DESC_CFG_AES_LEN_MSK		GENMASK(25, 24)
+#define CESA_SA_DESC_CFG_NOT_FRAG		(0 << 30)
+#define CESA_SA_DESC_CFG_FIRST_FRAG		(1 << 30)
+#define CESA_SA_DESC_CFG_LAST_FRAG		(2 << 30)
+#define CESA_SA_DESC_CFG_MID_FRAG		(3 << 30)
+#define CESA_SA_DESC_CFG_FRAG_MSK		GENMASK(31, 30)
+
+/*
+ * /-----------\ 0
+ * | ACCEL CFG |	4 * 8
+ * |-----------| 0x20
+ * | CRYPT KEY |	8 * 4
+ * |-----------| 0x40
+ * |  IV   IN  |	4 * 4
+ * |-----------| 0x40 (inplace)
+ * |  IV BUF   |	4 * 4
+ * |-----------| 0x80
+ * |  DATA IN  |	16 * x (max ->max_req_size)
+ * |-----------| 0x80 (inplace operation)
+ * |  DATA OUT |	16 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
+
+/*
+ * Hashing memory map:
+ * /-----------\ 0
+ * | ACCEL CFG |        4 * 8
+ * |-----------| 0x20
+ * | Inner IV  |        8 * 4
+ * |-----------| 0x40
+ * | Outer IV  |        8 * 4
+ * |-----------| 0x60
+ * | Output BUF|        8 * 4
+ * |-----------| 0x80
+ * |  DATA IN  |        64 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
+
+#define CESA_SA_CFG_SRAM_OFFSET			0x00
+#define CESA_SA_DATA_SRAM_OFFSET		0x80
+
+#define CESA_SA_CRYPT_KEY_SRAM_OFFSET		0x20
+#define CESA_SA_CRYPT_IV_SRAM_OFFSET		0x40
+
+#define CESA_SA_MAC_IIV_SRAM_OFFSET		0x20
+#define CESA_SA_MAC_OIV_SRAM_OFFSET		0x40
+#define CESA_SA_MAC_DIG_SRAM_OFFSET		0x60
+
+#define CESA_SA_DESC_CRYPT_DATA(offset)					\
+	cpu_to_le32((CESA_SA_DATA_SRAM_OFFSET + (offset)) |		\
+		    ((CESA_SA_DATA_SRAM_OFFSET + (offset)) << 16))
+
+#define CESA_SA_DESC_CRYPT_IV(offset)					\
+	cpu_to_le32((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) |	\
+		    ((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) << 16))
+
+#define CESA_SA_DESC_CRYPT_KEY(offset)					\
+	cpu_to_le32(CESA_SA_CRYPT_KEY_SRAM_OFFSET + (offset))
+
+#define CESA_SA_DESC_MAC_DATA(offset)					\
+	cpu_to_le32(CESA_SA_DATA_SRAM_OFFSET + (offset))
+#define CESA_SA_DESC_MAC_DATA_MSK		cpu_to_le32(GENMASK(15, 0))
+
+#define CESA_SA_DESC_MAC_TOTAL_LEN(total_len)	cpu_to_le32((total_len) << 16)
+#define CESA_SA_DESC_MAC_TOTAL_LEN_MSK		cpu_to_le32(GENMASK(31, 16))
+
+#define CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX	0xffff
+
+#define CESA_SA_DESC_MAC_DIGEST(offset)					\
+	cpu_to_le32(CESA_SA_MAC_DIG_SRAM_OFFSET + (offset))
+#define CESA_SA_DESC_MAC_DIGEST_MSK		cpu_to_le32(GENMASK(15, 0))
+
+#define CESA_SA_DESC_MAC_FRAG_LEN(frag_len)	cpu_to_le32((frag_len) << 16)
+#define CESA_SA_DESC_MAC_FRAG_LEN_MSK		cpu_to_le32(GENMASK(31, 16))
+
+#define CESA_SA_DESC_MAC_IV(offset)					\
+	cpu_to_le32((CESA_SA_MAC_IIV_SRAM_OFFSET + (offset)) |		\
+		    ((CESA_SA_MAC_OIV_SRAM_OFFSET + (offset)) << 16))
+
+#define CESA_SA_SRAM_SIZE			2048
+#define CESA_SA_SRAM_PAYLOAD_SIZE		(cesa_dev->sram_size - \
+						 CESA_SA_DATA_SRAM_OFFSET)
+
+#define CESA_SA_DEFAULT_SRAM_SIZE		2048
+#define CESA_SA_MIN_SRAM_SIZE			1024
+
+#define CESA_SA_SRAM_MSK			(2048 - 1)
+
+#define CESA_MAX_HASH_BLOCK_SIZE		64
+#define CESA_HASH_BLOCK_SIZE_MSK		(CESA_MAX_HASH_BLOCK_SIZE - 1)
+
+/**
+ * struct mv_cesa_sec_accel_desc - security accelerator descriptor
+ * @config:	engine config
+ * @enc_p:	input and output data pointers for a cipher operation
+ * @enc_len:	cipher operation length
+ * @enc_key_p:	cipher key pointer
+ * @enc_iv:	cipher IV pointers
+ * @mac_src_p:	input pointer and total hash length
+ * @mac_digest:	digest pointer and hash operation length
+ * @mac_iv:	hmac IV pointers
+ *
+ * Structure passed to the CESA engine to describe the crypto operation
+ * to be executed.
+ */
+struct mv_cesa_sec_accel_desc {
+	__le32 config;
+	__le32 enc_p;
+	__le32 enc_len;
+	__le32 enc_key_p;
+	__le32 enc_iv;
+	__le32 mac_src_p;
+	__le32 mac_digest;
+	__le32 mac_iv;
+};
+
+/**
+ * struct mv_cesa_blkcipher_op_ctx - cipher operation context
+ * @key:	cipher key
+ * @iv:		cipher IV
+ *
+ * Context associated to a cipher operation.
+ */
+struct mv_cesa_blkcipher_op_ctx {
+	u32 key[8];
+	u32 iv[4];
+};
+
+/**
+ * struct mv_cesa_hash_op_ctx - hash or hmac operation context
+ * @key:	cipher key
+ * @iv:		cipher IV
+ *
+ * Context associated to an hash or hmac operation.
+ */
+struct mv_cesa_hash_op_ctx {
+	u32 iv[16];
+	u32 hash[8];
+};
+
+/**
+ * struct mv_cesa_op_ctx - crypto operation context
+ * @desc:	CESA descriptor
+ * @ctx:	context associated to the crypto operation
+ *
+ * Context associated to a crypto operation.
+ */
+struct mv_cesa_op_ctx {
+	struct mv_cesa_sec_accel_desc desc;
+	union {
+		struct mv_cesa_blkcipher_op_ctx blkcipher;
+		struct mv_cesa_hash_op_ctx hash;
+	} ctx;
+};
+
+/* TDMA descriptor flags */
+#define CESA_TDMA_DST_IN_SRAM			BIT(31)
+#define CESA_TDMA_SRC_IN_SRAM			BIT(30)
+#define CESA_TDMA_END_OF_REQ			BIT(29)
+#define CESA_TDMA_BREAK_CHAIN			BIT(28)
+#define CESA_TDMA_SET_STATE			BIT(27)
+#define CESA_TDMA_TYPE_MSK			GENMASK(26, 0)
+#define CESA_TDMA_DUMMY				0
+#define CESA_TDMA_DATA				1
+#define CESA_TDMA_OP				2
+#define CESA_TDMA_RESULT			3
+
+/**
+ * struct mv_cesa_tdma_desc - TDMA descriptor
+ * @byte_cnt:	number of bytes to transfer
+ * @src:	DMA address of the source
+ * @dst:	DMA address of the destination
+ * @next_dma:	DMA address of the next TDMA descriptor
+ * @cur_dma:	DMA address of this TDMA descriptor
+ * @next:	pointer to the next TDMA descriptor
+ * @op:		CESA operation attached to this TDMA descriptor
+ * @data:	raw data attached to this TDMA descriptor
+ * @flags:	flags describing the TDMA transfer. See the
+ *		"TDMA descriptor flags" section above
+ *
+ * TDMA descriptor used to create a transfer chain describing a crypto
+ * operation.
+ */
+struct mv_cesa_tdma_desc {
+	__le32 byte_cnt;
+	__le32 src;
+	__le32 dst;
+	__le32 next_dma;
+
+	/* Software state */
+	dma_addr_t cur_dma;
+	struct mv_cesa_tdma_desc *next;
+	union {
+		struct mv_cesa_op_ctx *op;
+		void *data;
+	};
+	u32 flags;
+};
+
+/**
+ * struct mv_cesa_sg_dma_iter - scatter-gather iterator
+ * @dir:	transfer direction
+ * @sg:		scatter list
+ * @offset:	current position in the scatter list
+ * @op_offset:	current position in the crypto operation
+ *
+ * Iterator used to iterate over a scatterlist while creating a TDMA chain for
+ * a crypto operation.
+ */
+struct mv_cesa_sg_dma_iter {
+	enum dma_data_direction dir;
+	struct scatterlist *sg;
+	unsigned int offset;
+	unsigned int op_offset;
+};
+
+/**
+ * struct mv_cesa_dma_iter - crypto operation iterator
+ * @len:	the crypto operation length
+ * @offset:	current position in the crypto operation
+ * @op_len:	sub-operation length (the crypto engine can only act on 2kb
+ *		chunks)
+ *
+ * Iterator used to create a TDMA chain for a given crypto operation.
+ */
+struct mv_cesa_dma_iter {
+	unsigned int len;
+	unsigned int offset;
+	unsigned int op_len;
+};
+
+/**
+ * struct mv_cesa_tdma_chain - TDMA chain
+ * @first:	first entry in the TDMA chain
+ * @last:	last entry in the TDMA chain
+ *
+ * Stores a TDMA chain for a specific crypto operation.
+ */
+struct mv_cesa_tdma_chain {
+	struct mv_cesa_tdma_desc *first;
+	struct mv_cesa_tdma_desc *last;
+};
+
+struct mv_cesa_engine;
+
+/**
+ * struct mv_cesa_caps - CESA device capabilities
+ * @engines:		number of engines
+ * @has_tdma:		whether this device has a TDMA block
+ * @cipher_algs:	supported cipher algorithms
+ * @ncipher_algs:	number of supported cipher algorithms
+ * @ahash_algs:		supported hash algorithms
+ * @nahash_algs:	number of supported hash algorithms
+ *
+ * Structure used to describe CESA device capabilities.
+ */
+struct mv_cesa_caps {
+	int nengines;
+	bool has_tdma;
+	struct skcipher_alg **cipher_algs;
+	int ncipher_algs;
+	struct ahash_alg **ahash_algs;
+	int nahash_algs;
+};
+
+/**
+ * struct mv_cesa_dev_dma - DMA pools
+ * @tdma_desc_pool:	TDMA desc pool
+ * @op_pool:		crypto operation pool
+ * @cache_pool:		data cache pool (used by hash implementation when the
+ *			hash request is smaller than the hash block size)
+ * @padding_pool:	padding pool (used by hash implementation when hardware
+ *			padding cannot be used)
+ *
+ * Structure containing the different DMA pools used by this driver.
+ */
+struct mv_cesa_dev_dma {
+	struct dma_pool *tdma_desc_pool;
+	struct dma_pool *op_pool;
+	struct dma_pool *cache_pool;
+	struct dma_pool *padding_pool;
+};
+
+/**
+ * struct mv_cesa_dev - CESA device
+ * @caps:	device capabilities
+ * @regs:	device registers
+ * @sram_size:	usable SRAM size
+ * @lock:	device lock
+ * @engines:	array of engines
+ * @dma:	dma pools
+ *
+ * Structure storing CESA device information.
+ */
+struct mv_cesa_dev {
+	const struct mv_cesa_caps *caps;
+	void __iomem *regs;
+	struct device *dev;
+	unsigned int sram_size;
+	spinlock_t lock;
+	struct mv_cesa_engine *engines;
+	struct mv_cesa_dev_dma *dma;
+};
+
+/**
+ * struct mv_cesa_engine - CESA engine
+ * @id:			engine id
+ * @regs:		engine registers
+ * @sram:		SRAM memory region
+ * @sram_dma:		DMA address of the SRAM memory region
+ * @lock:		engine lock
+ * @req:		current crypto request
+ * @clk:		engine clk
+ * @zclk:		engine zclk
+ * @max_req_len:	maximum chunk length (useful to create the TDMA chain)
+ * @int_mask:		interrupt mask cache
+ * @pool:		memory pool pointing to the memory region reserved in
+ *			SRAM
+ * @queue:		fifo of the pending crypto requests
+ * @load:		engine load counter, useful for load balancing
+ * @chain:		list of the current tdma descriptors being processed
+ * 			by this engine.
+ * @complete_queue:	fifo of the processed requests by the engine
+ *
+ * Structure storing CESA engine information.
+ */
+struct mv_cesa_engine {
+	int id;
+	void __iomem *regs;
+	void __iomem *sram;
+	dma_addr_t sram_dma;
+	spinlock_t lock;
+	struct crypto_async_request *req;
+	struct clk *clk;
+	struct clk *zclk;
+	size_t max_req_len;
+	u32 int_mask;
+	struct gen_pool *pool;
+	struct crypto_queue queue;
+	atomic_t load;
+	struct mv_cesa_tdma_chain chain;
+	struct list_head complete_queue;
+};
+
+/**
+ * struct mv_cesa_req_ops - CESA request operations
+ * @process:	process a request chunk result (should return 0 if the
+ *		operation, -EINPROGRESS if it needs more steps or an error
+ *		code)
+ * @step:	launch the crypto operation on the next chunk
+ * @cleanup:	cleanup the crypto request (release associated data)
+ * @complete:	complete the request, i.e copy result or context from sram when
+ * 		needed.
+ */
+struct mv_cesa_req_ops {
+	int (*process)(struct crypto_async_request *req, u32 status);
+	void (*step)(struct crypto_async_request *req);
+	void (*cleanup)(struct crypto_async_request *req);
+	void (*complete)(struct crypto_async_request *req);
+};
+
+/**
+ * struct mv_cesa_ctx - CESA operation context
+ * @ops:	crypto operations
+ *
+ * Base context structure inherited by operation specific ones.
+ */
+struct mv_cesa_ctx {
+	const struct mv_cesa_req_ops *ops;
+};
+
+/**
+ * struct mv_cesa_hash_ctx - CESA hash operation context
+ * @base:	base context structure
+ *
+ * Hash context structure.
+ */
+struct mv_cesa_hash_ctx {
+	struct mv_cesa_ctx base;
+};
+
+/**
+ * struct mv_cesa_hash_ctx - CESA hmac operation context
+ * @base:	base context structure
+ * @iv:		initialization vectors
+ *
+ * HMAC context structure.
+ */
+struct mv_cesa_hmac_ctx {
+	struct mv_cesa_ctx base;
+	u32 iv[16];
+};
+
+/**
+ * enum mv_cesa_req_type - request type definitions
+ * @CESA_STD_REQ:	standard request
+ * @CESA_DMA_REQ:	DMA request
+ */
+enum mv_cesa_req_type {
+	CESA_STD_REQ,
+	CESA_DMA_REQ,
+};
+
+/**
+ * struct mv_cesa_req - CESA request
+ * @engine:	engine associated with this request
+ * @chain:	list of tdma descriptors associated  with this request
+ */
+struct mv_cesa_req {
+	struct mv_cesa_engine *engine;
+	struct mv_cesa_tdma_chain chain;
+};
+
+/**
+ * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard
+ *				requests
+ * @iter:	sg mapping iterator
+ * @offset:	current offset in the SG entry mapped in memory
+ */
+struct mv_cesa_sg_std_iter {
+	struct sg_mapping_iter iter;
+	unsigned int offset;
+};
+
+/**
+ * struct mv_cesa_skcipher_std_req - cipher standard request
+ * @op:		operation context
+ * @offset:	current operation offset
+ * @size:	size of the crypto operation
+ */
+struct mv_cesa_skcipher_std_req {
+	struct mv_cesa_op_ctx op;
+	unsigned int offset;
+	unsigned int size;
+	bool skip_ctx;
+};
+
+/**
+ * struct mv_cesa_skcipher_req - cipher request
+ * @req:	type specific request information
+ * @src_nents:	number of entries in the src sg list
+ * @dst_nents:	number of entries in the dest sg list
+ */
+struct mv_cesa_skcipher_req {
+	struct mv_cesa_req base;
+	struct mv_cesa_skcipher_std_req std;
+	int src_nents;
+	int dst_nents;
+};
+
+/**
+ * struct mv_cesa_ahash_std_req - standard hash request
+ * @offset:	current operation offset
+ */
+struct mv_cesa_ahash_std_req {
+	unsigned int offset;
+};
+
+/**
+ * struct mv_cesa_ahash_dma_req - DMA hash request
+ * @padding:		padding buffer
+ * @padding_dma:	DMA address of the padding buffer
+ * @cache_dma:		DMA address of the cache buffer
+ */
+struct mv_cesa_ahash_dma_req {
+	u8 *padding;
+	dma_addr_t padding_dma;
+	u8 *cache;
+	dma_addr_t cache_dma;
+};
+
+/**
+ * struct mv_cesa_ahash_req - hash request
+ * @req:		type specific request information
+ * @cache:		cache buffer
+ * @cache_ptr:		write pointer in the cache buffer
+ * @len:		hash total length
+ * @src_nents:		number of entries in the scatterlist
+ * @last_req:		define whether the current operation is the last one
+ *			or not
+ * @state:		hash state
+ */
+struct mv_cesa_ahash_req {
+	struct mv_cesa_req base;
+	union {
+		struct mv_cesa_ahash_dma_req dma;
+		struct mv_cesa_ahash_std_req std;
+	} req;
+	struct mv_cesa_op_ctx op_tmpl;
+	u8 cache[CESA_MAX_HASH_BLOCK_SIZE];
+	unsigned int cache_ptr;
+	u64 len;
+	int src_nents;
+	bool last_req;
+	bool algo_le;
+	u32 state[8];
+};
+
+/* CESA functions */
+
+extern struct mv_cesa_dev *cesa_dev;
+
+
+static inline void
+mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine,
+					struct crypto_async_request *req)
+{
+	list_add_tail(&req->list, &engine->complete_queue);
+}
+
+static inline struct crypto_async_request *
+mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine)
+{
+	struct crypto_async_request *req;
+
+	req = list_first_entry_or_null(&engine->complete_queue,
+				       struct crypto_async_request,
+				       list);
+	if (req)
+		list_del(&req->list);
+
+	return req;
+}
+
+
+static inline enum mv_cesa_req_type
+mv_cesa_req_get_type(struct mv_cesa_req *req)
+{
+	return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ;
+}
+
+static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op,
+					 u32 cfg, u32 mask)
+{
+	op->desc.config &= cpu_to_le32(~mask);
+	op->desc.config |= cpu_to_le32(cfg);
+}
+
+static inline u32 mv_cesa_get_op_cfg(const struct mv_cesa_op_ctx *op)
+{
+	return le32_to_cpu(op->desc.config);
+}
+
+static inline void mv_cesa_set_op_cfg(struct mv_cesa_op_ctx *op, u32 cfg)
+{
+	op->desc.config = cpu_to_le32(cfg);
+}
+
+static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine,
+				     struct mv_cesa_op_ctx *op)
+{
+	u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK;
+
+	op->desc.enc_p = CESA_SA_DESC_CRYPT_DATA(offset);
+	op->desc.enc_key_p = CESA_SA_DESC_CRYPT_KEY(offset);
+	op->desc.enc_iv = CESA_SA_DESC_CRYPT_IV(offset);
+	op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_DATA_MSK;
+	op->desc.mac_src_p |= CESA_SA_DESC_MAC_DATA(offset);
+	op->desc.mac_digest &= ~CESA_SA_DESC_MAC_DIGEST_MSK;
+	op->desc.mac_digest |= CESA_SA_DESC_MAC_DIGEST(offset);
+	op->desc.mac_iv = CESA_SA_DESC_MAC_IV(offset);
+}
+
+static inline void mv_cesa_set_crypt_op_len(struct mv_cesa_op_ctx *op, int len)
+{
+	op->desc.enc_len = cpu_to_le32(len);
+}
+
+static inline void mv_cesa_set_mac_op_total_len(struct mv_cesa_op_ctx *op,
+						int len)
+{
+	op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_TOTAL_LEN_MSK;
+	op->desc.mac_src_p |= CESA_SA_DESC_MAC_TOTAL_LEN(len);
+}
+
+static inline void mv_cesa_set_mac_op_frag_len(struct mv_cesa_op_ctx *op,
+					       int len)
+{
+	op->desc.mac_digest &= ~CESA_SA_DESC_MAC_FRAG_LEN_MSK;
+	op->desc.mac_digest |= CESA_SA_DESC_MAC_FRAG_LEN(len);
+}
+
+static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine,
+					u32 int_mask)
+{
+	if (int_mask == engine->int_mask)
+		return;
+
+	writel_relaxed(int_mask, engine->regs + CESA_SA_INT_MSK);
+	engine->int_mask = int_mask;
+}
+
+static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
+{
+	return engine->int_mask;
+}
+
+static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op)
+{
+	return (mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) ==
+		CESA_SA_DESC_CFG_FIRST_FRAG;
+}
+
+int mv_cesa_queue_req(struct crypto_async_request *req,
+		      struct mv_cesa_req *creq);
+
+struct crypto_async_request *
+mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
+			   struct crypto_async_request **backlog);
+
+static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
+{
+	int i;
+	u32 min_load = U32_MAX;
+	struct mv_cesa_engine *selected = NULL;
+
+	for (i = 0; i < cesa_dev->caps->nengines; i++) {
+		struct mv_cesa_engine *engine = cesa_dev->engines + i;
+		u32 load = atomic_read(&engine->load);
+		if (load < min_load) {
+			min_load = load;
+			selected = engine;
+		}
+	}
+
+	atomic_add(weight, &selected->load);
+
+	return selected;
+}
+
+/*
+ * Helper function that indicates whether a crypto request needs to be
+ * cleaned up or not after being enqueued using mv_cesa_queue_req().
+ */
+static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
+					    int ret)
+{
+	/*
+	 * The queue still had some space, the request was queued
+	 * normally, so there's no need to clean it up.
+	 */
+	if (ret == -EINPROGRESS)
+		return false;
+
+	/*
+	 * The queue had not space left, but since the request is
+	 * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
+	 * the backlog and will be processed later. There's no need to
+	 * clean it up.
+	 */
+	if (ret == -EBUSY)
+		return false;
+
+	/* Request wasn't queued, we need to clean it up */
+	return true;
+}
+
+/* TDMA functions */
+
+static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
+					     unsigned int len)
+{
+	iter->len = len;
+	iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE);
+	iter->offset = 0;
+}
+
+static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter,
+					    struct scatterlist *sg,
+					    enum dma_data_direction dir)
+{
+	iter->op_offset = 0;
+	iter->offset = 0;
+	iter->sg = sg;
+	iter->dir = dir;
+}
+
+static inline unsigned int
+mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter,
+				  struct mv_cesa_sg_dma_iter *sgiter)
+{
+	return min(iter->op_len - sgiter->op_offset,
+		   sg_dma_len(sgiter->sg) - sgiter->offset);
+}
+
+bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *chain,
+					struct mv_cesa_sg_dma_iter *sgiter,
+					unsigned int len);
+
+static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
+{
+	iter->offset += iter->op_len;
+	iter->op_len = min(iter->len - iter->offset,
+			   CESA_SA_SRAM_PAYLOAD_SIZE);
+
+	return iter->op_len;
+}
+
+void mv_cesa_dma_step(struct mv_cesa_req *dreq);
+
+static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq,
+				      u32 status)
+{
+	if (!(status & CESA_SA_INT_ACC0_IDMA_DONE))
+		return -EINPROGRESS;
+
+	if (status & CESA_SA_INT_IDMA_OWN_ERR)
+		return -EINVAL;
+
+	return 0;
+}
+
+void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
+			 struct mv_cesa_engine *engine);
+void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq);
+void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
+			struct mv_cesa_req *dreq);
+int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status);
+
+
+static inline void
+mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
+{
+	memset(chain, 0, sizeof(*chain));
+}
+
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+			  u32 size, u32 flags, gfp_t gfp_flags);
+
+struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
+					const struct mv_cesa_op_ctx *op_templ,
+					bool skip_ctx,
+					gfp_t flags);
+
+int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
+				  dma_addr_t dst, dma_addr_t src, u32 size,
+				  u32 flags, gfp_t gfp_flags);
+
+int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags);
+int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags);
+
+int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
+				 struct mv_cesa_dma_iter *dma_iter,
+				 struct mv_cesa_sg_dma_iter *sgiter,
+				 gfp_t gfp_flags);
+
+/* Algorithm definitions */
+
+extern struct ahash_alg mv_md5_alg;
+extern struct ahash_alg mv_sha1_alg;
+extern struct ahash_alg mv_sha256_alg;
+extern struct ahash_alg mv_ahmac_md5_alg;
+extern struct ahash_alg mv_ahmac_sha1_alg;
+extern struct ahash_alg mv_ahmac_sha256_alg;
+
+extern struct skcipher_alg mv_cesa_ecb_des_alg;
+extern struct skcipher_alg mv_cesa_cbc_des_alg;
+extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_ecb_aes_alg;
+extern struct skcipher_alg mv_cesa_cbc_aes_alg;
+
+#endif /* __MARVELL_CESA_H__ */
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
new file mode 100644
index 0000000..0ae84ec
--- /dev/null
+++ b/drivers/crypto/marvell/cipher.c
@@ -0,0 +1,815 @@
+/*
+ * Cipher algorithms supported by the CESA: DES, 3DES and AES.
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Author: Arnaud Ebalard <arno@natisbad.org>
+ *
+ * This work is based on an initial version written by
+ * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+
+#include "cesa.h"
+
+struct mv_cesa_des_ctx {
+	struct mv_cesa_ctx base;
+	u8 key[DES_KEY_SIZE];
+};
+
+struct mv_cesa_des3_ctx {
+	struct mv_cesa_ctx base;
+	u8 key[DES3_EDE_KEY_SIZE];
+};
+
+struct mv_cesa_aes_ctx {
+	struct mv_cesa_ctx base;
+	struct crypto_aes_ctx aes;
+};
+
+struct mv_cesa_skcipher_dma_iter {
+	struct mv_cesa_dma_iter base;
+	struct mv_cesa_sg_dma_iter src;
+	struct mv_cesa_sg_dma_iter dst;
+};
+
+static inline void
+mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
+			       struct skcipher_request *req)
+{
+	mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
+	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
+	mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
+}
+
+static inline bool
+mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
+{
+	iter->src.op_offset = 0;
+	iter->dst.op_offset = 0;
+
+	return mv_cesa_req_dma_iter_next_op(&iter->base);
+}
+
+static inline void
+mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+
+	if (req->dst != req->src) {
+		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
+			     DMA_FROM_DEVICE);
+		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
+			     DMA_TO_DEVICE);
+	} else {
+		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
+			     DMA_BIDIRECTIONAL);
+	}
+	mv_cesa_dma_cleanup(&creq->base);
+}
+
+static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+		mv_cesa_skcipher_dma_cleanup(req);
+}
+
+static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
+	struct mv_cesa_engine *engine = creq->base.engine;
+	size_t  len = min_t(size_t, req->cryptlen - sreq->offset,
+			    CESA_SA_SRAM_PAYLOAD_SIZE);
+
+	mv_cesa_adjust_op(engine, &sreq->op);
+	memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+
+	len = sg_pcopy_to_buffer(req->src, creq->src_nents,
+				 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
+				 len, sreq->offset);
+
+	sreq->size = len;
+	mv_cesa_set_crypt_op_len(&sreq->op, len);
+
+	/* FIXME: only update enc_len field */
+	if (!sreq->skip_ctx) {
+		memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+		sreq->skip_ctx = true;
+	} else {
+		memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
+	}
+
+	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
+	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
+	BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+	       CESA_SA_CMD_EN_CESA_SA_ACCL0);
+	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
+}
+
+static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
+					u32 status)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
+	struct mv_cesa_engine *engine = creq->base.engine;
+	size_t len;
+
+	len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
+				   engine->sram + CESA_SA_DATA_SRAM_OFFSET,
+				   sreq->size, sreq->offset);
+
+	sreq->offset += len;
+	if (sreq->offset < req->cryptlen)
+		return -EINPROGRESS;
+
+	return 0;
+}
+
+static int mv_cesa_skcipher_process(struct crypto_async_request *req,
+				    u32 status)
+{
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
+	struct mv_cesa_req *basereq = &creq->base;
+
+	if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
+		return mv_cesa_skcipher_std_process(skreq, status);
+
+	return mv_cesa_dma_process(basereq, status);
+}
+
+static void mv_cesa_skcipher_step(struct crypto_async_request *req)
+{
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+		mv_cesa_dma_step(&creq->base);
+	else
+		mv_cesa_skcipher_std_step(skreq);
+}
+
+static inline void
+mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_req *basereq = &creq->base;
+
+	mv_cesa_dma_prepare(basereq, basereq->engine);
+}
+
+static inline void
+mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
+
+	sreq->size = 0;
+	sreq->offset = 0;
+}
+
+static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
+					    struct mv_cesa_engine *engine)
+{
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
+	creq->base.engine = engine;
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+		mv_cesa_skcipher_dma_prepare(skreq);
+	else
+		mv_cesa_skcipher_std_prepare(skreq);
+}
+
+static inline void
+mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
+{
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+
+	mv_cesa_skcipher_cleanup(skreq);
+}
+
+static void
+mv_cesa_skcipher_complete(struct crypto_async_request *req)
+{
+	struct skcipher_request *skreq = skcipher_request_cast(req);
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
+	struct mv_cesa_engine *engine = creq->base.engine;
+	unsigned int ivsize;
+
+	atomic_sub(skreq->cryptlen, &engine->load);
+	ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
+		struct mv_cesa_req *basereq;
+
+		basereq = &creq->base;
+		memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
+		       ivsize);
+	} else {
+		memcpy_fromio(skreq->iv,
+			      engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
+			      ivsize);
+	}
+}
+
+static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
+	.step = mv_cesa_skcipher_step,
+	.process = mv_cesa_skcipher_process,
+	.cleanup = mv_cesa_skcipher_req_cleanup,
+	.complete = mv_cesa_skcipher_complete,
+};
+
+static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
+{
+	void *ctx = crypto_tfm_ctx(tfm);
+
+	memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
+}
+
+static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->ops = &mv_cesa_skcipher_req_ops;
+
+	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+				    sizeof(struct mv_cesa_skcipher_req));
+
+	return 0;
+}
+
+static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+			      unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	int remaining;
+	int offset;
+	int ret;
+	int i;
+
+	ret = crypto_aes_expand_key(&ctx->aes, key, len);
+	if (ret) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return ret;
+	}
+
+	remaining = (ctx->aes.key_length - 16) / 4;
+	offset = ctx->aes.key_length + 24 - remaining;
+	for (i = 0; i < remaining; i++)
+		ctx->aes.key_dec[4 + i] =
+			cpu_to_le32(ctx->aes.key_enc[offset + i]);
+
+	return 0;
+}
+
+static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
+			      unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (len != DES_KEY_SIZE) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ret = des_ekey(tmp, key);
+	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, DES_KEY_SIZE);
+
+	return 0;
+}
+
+static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
+				   const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
+
+	return 0;
+}
+
+static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
+					 const struct mv_cesa_op_ctx *op_templ)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		      GFP_KERNEL : GFP_ATOMIC;
+	struct mv_cesa_req *basereq = &creq->base;
+	struct mv_cesa_skcipher_dma_iter iter;
+	bool skip_ctx = false;
+	int ret;
+	unsigned int ivsize;
+
+	basereq->chain.first = NULL;
+	basereq->chain.last = NULL;
+
+	if (req->src != req->dst) {
+		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
+				 DMA_TO_DEVICE);
+		if (!ret)
+			return -ENOMEM;
+
+		ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
+				 DMA_FROM_DEVICE);
+		if (!ret) {
+			ret = -ENOMEM;
+			goto err_unmap_src;
+		}
+	} else {
+		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
+				 DMA_BIDIRECTIONAL);
+		if (!ret)
+			return -ENOMEM;
+	}
+
+	mv_cesa_tdma_desc_iter_init(&basereq->chain);
+	mv_cesa_skcipher_req_iter_init(&iter, req);
+
+	do {
+		struct mv_cesa_op_ctx *op;
+
+		op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
+		if (IS_ERR(op)) {
+			ret = PTR_ERR(op);
+			goto err_free_tdma;
+		}
+		skip_ctx = true;
+
+		mv_cesa_set_crypt_op_len(op, iter.base.op_len);
+
+		/* Add input transfers */
+		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
+						   &iter.src, flags);
+		if (ret)
+			goto err_free_tdma;
+
+		/* Add dummy desc to launch the crypto operation */
+		ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
+		if (ret)
+			goto err_free_tdma;
+
+		/* Add output transfers */
+		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
+						   &iter.dst, flags);
+		if (ret)
+			goto err_free_tdma;
+
+	} while (mv_cesa_skcipher_req_iter_next_op(&iter));
+
+	/* Add output data for IV */
+	ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
+	ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
+				    CESA_SA_DATA_SRAM_OFFSET,
+				    CESA_TDMA_SRC_IN_SRAM, flags);
+
+	if (ret)
+		goto err_free_tdma;
+
+	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+	return 0;
+
+err_free_tdma:
+	mv_cesa_dma_cleanup(basereq);
+	if (req->dst != req->src)
+		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
+			     DMA_FROM_DEVICE);
+
+err_unmap_src:
+	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
+		     req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+	return ret;
+}
+
+static inline int
+mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
+			      const struct mv_cesa_op_ctx *op_templ)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
+	struct mv_cesa_req *basereq = &creq->base;
+
+	sreq->op = *op_templ;
+	sreq->skip_ctx = false;
+	basereq->chain.first = NULL;
+	basereq->chain.last = NULL;
+
+	return 0;
+}
+
+static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
+				     struct mv_cesa_op_ctx *tmpl)
+{
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	unsigned int blksize = crypto_skcipher_blocksize(tfm);
+	int ret;
+
+	if (!IS_ALIGNED(req->cryptlen, blksize))
+		return -EINVAL;
+
+	creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
+	if (creq->src_nents < 0) {
+		dev_err(cesa_dev->dev, "Invalid number of src SG");
+		return creq->src_nents;
+	}
+	creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+	if (creq->dst_nents < 0) {
+		dev_err(cesa_dev->dev, "Invalid number of dst SG");
+		return creq->dst_nents;
+	}
+
+	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
+			      CESA_SA_DESC_CFG_OP_MSK);
+
+	if (cesa_dev->caps->has_tdma)
+		ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
+	else
+		ret = mv_cesa_skcipher_std_req_init(req, tmpl);
+
+	return ret;
+}
+
+static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
+				      struct mv_cesa_op_ctx *tmpl)
+{
+	int ret;
+	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+	struct mv_cesa_engine *engine;
+
+	ret = mv_cesa_skcipher_req_init(req, tmpl);
+	if (ret)
+		return ret;
+
+	engine = mv_cesa_select_engine(req->cryptlen);
+	mv_cesa_skcipher_prepare(&req->base, engine);
+
+	ret = mv_cesa_queue_req(&req->base, &creq->base);
+
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
+		mv_cesa_skcipher_cleanup(req);
+
+	return ret;
+}
+
+static int mv_cesa_des_op(struct skcipher_request *req,
+			  struct mv_cesa_op_ctx *tmpl)
+{
+	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
+			      CESA_SA_DESC_CFG_CRYPTM_MSK);
+
+	memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
+
+	return mv_cesa_skcipher_queue_req(req, tmpl);
+}
+
+static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl,
+			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
+			   CESA_SA_DESC_CFG_DIR_ENC);
+
+	return mv_cesa_des_op(req, &tmpl);
+}
+
+static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl,
+			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
+			   CESA_SA_DESC_CFG_DIR_DEC);
+
+	return mv_cesa_des_op(req, &tmpl);
+}
+
+struct skcipher_alg mv_cesa_ecb_des_alg = {
+	.setkey = mv_cesa_des_setkey,
+	.encrypt = mv_cesa_ecb_des_encrypt,
+	.decrypt = mv_cesa_ecb_des_decrypt,
+	.min_keysize = DES_KEY_SIZE,
+	.max_keysize = DES_KEY_SIZE,
+	.base = {
+		.cra_name = "ecb(des)",
+		.cra_driver_name = "mv-ecb-des",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
+	},
+};
+
+static int mv_cesa_cbc_des_op(struct skcipher_request *req,
+			      struct mv_cesa_op_ctx *tmpl)
+{
+	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
+			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
+
+	memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
+
+	return mv_cesa_des_op(req, tmpl);
+}
+
+static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
+
+	return mv_cesa_cbc_des_op(req, &tmpl);
+}
+
+static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
+
+	return mv_cesa_cbc_des_op(req, &tmpl);
+}
+
+struct skcipher_alg mv_cesa_cbc_des_alg = {
+	.setkey = mv_cesa_des_setkey,
+	.encrypt = mv_cesa_cbc_des_encrypt,
+	.decrypt = mv_cesa_cbc_des_decrypt,
+	.min_keysize = DES_KEY_SIZE,
+	.max_keysize = DES_KEY_SIZE,
+	.ivsize = DES_BLOCK_SIZE,
+	.base = {
+		.cra_name = "cbc(des)",
+		.cra_driver_name = "mv-cbc-des",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
+	},
+};
+
+static int mv_cesa_des3_op(struct skcipher_request *req,
+			   struct mv_cesa_op_ctx *tmpl)
+{
+	struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
+			      CESA_SA_DESC_CFG_CRYPTM_MSK);
+
+	memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
+
+	return mv_cesa_skcipher_queue_req(req, tmpl);
+}
+
+static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl,
+			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
+			   CESA_SA_DESC_CFG_3DES_EDE |
+			   CESA_SA_DESC_CFG_DIR_ENC);
+
+	return mv_cesa_des3_op(req, &tmpl);
+}
+
+static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl,
+			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
+			   CESA_SA_DESC_CFG_3DES_EDE |
+			   CESA_SA_DESC_CFG_DIR_DEC);
+
+	return mv_cesa_des3_op(req, &tmpl);
+}
+
+struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
+	.setkey = mv_cesa_des3_ede_setkey,
+	.encrypt = mv_cesa_ecb_des3_ede_encrypt,
+	.decrypt = mv_cesa_ecb_des3_ede_decrypt,
+	.min_keysize = DES3_EDE_KEY_SIZE,
+	.max_keysize = DES3_EDE_KEY_SIZE,
+	.ivsize = DES3_EDE_BLOCK_SIZE,
+	.base = {
+		.cra_name = "ecb(des3_ede)",
+		.cra_driver_name = "mv-ecb-des3-ede",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
+	},
+};
+
+static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
+			       struct mv_cesa_op_ctx *tmpl)
+{
+	memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
+
+	return mv_cesa_des3_op(req, tmpl);
+}
+
+static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl,
+			   CESA_SA_DESC_CFG_CRYPTCM_CBC |
+			   CESA_SA_DESC_CFG_3DES_EDE |
+			   CESA_SA_DESC_CFG_DIR_ENC);
+
+	return mv_cesa_cbc_des3_op(req, &tmpl);
+}
+
+static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl,
+			   CESA_SA_DESC_CFG_CRYPTCM_CBC |
+			   CESA_SA_DESC_CFG_3DES_EDE |
+			   CESA_SA_DESC_CFG_DIR_DEC);
+
+	return mv_cesa_cbc_des3_op(req, &tmpl);
+}
+
+struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
+	.setkey = mv_cesa_des3_ede_setkey,
+	.encrypt = mv_cesa_cbc_des3_ede_encrypt,
+	.decrypt = mv_cesa_cbc_des3_ede_decrypt,
+	.min_keysize = DES3_EDE_KEY_SIZE,
+	.max_keysize = DES3_EDE_KEY_SIZE,
+	.ivsize = DES3_EDE_BLOCK_SIZE,
+	.base = {
+		.cra_name = "cbc(des3_ede)",
+		.cra_driver_name = "mv-cbc-des3-ede",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
+	},
+};
+
+static int mv_cesa_aes_op(struct skcipher_request *req,
+			  struct mv_cesa_op_ctx *tmpl)
+{
+	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	int i;
+	u32 *key;
+	u32 cfg;
+
+	cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
+
+	if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
+		key = ctx->aes.key_dec;
+	else
+		key = ctx->aes.key_enc;
+
+	for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
+		tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
+
+	if (ctx->aes.key_length == 24)
+		cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
+	else if (ctx->aes.key_length == 32)
+		cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
+
+	mv_cesa_update_op_cfg(tmpl, cfg,
+			      CESA_SA_DESC_CFG_CRYPTM_MSK |
+			      CESA_SA_DESC_CFG_AES_LEN_MSK);
+
+	return mv_cesa_skcipher_queue_req(req, tmpl);
+}
+
+static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl,
+			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
+			   CESA_SA_DESC_CFG_DIR_ENC);
+
+	return mv_cesa_aes_op(req, &tmpl);
+}
+
+static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl,
+			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
+			   CESA_SA_DESC_CFG_DIR_DEC);
+
+	return mv_cesa_aes_op(req, &tmpl);
+}
+
+struct skcipher_alg mv_cesa_ecb_aes_alg = {
+	.setkey = mv_cesa_aes_setkey,
+	.encrypt = mv_cesa_ecb_aes_encrypt,
+	.decrypt = mv_cesa_ecb_aes_decrypt,
+	.min_keysize = AES_MIN_KEY_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE,
+	.base = {
+		.cra_name = "ecb(aes)",
+		.cra_driver_name = "mv-ecb-aes",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
+	},
+};
+
+static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
+			      struct mv_cesa_op_ctx *tmpl)
+{
+	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
+			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
+	memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
+
+	return mv_cesa_aes_op(req, tmpl);
+}
+
+static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
+
+	return mv_cesa_cbc_aes_op(req, &tmpl);
+}
+
+static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
+{
+	struct mv_cesa_op_ctx tmpl;
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
+
+	return mv_cesa_cbc_aes_op(req, &tmpl);
+}
+
+struct skcipher_alg mv_cesa_cbc_aes_alg = {
+	.setkey = mv_cesa_aes_setkey,
+	.encrypt = mv_cesa_cbc_aes_encrypt,
+	.decrypt = mv_cesa_cbc_aes_decrypt,
+	.min_keysize = AES_MIN_KEY_SIZE,
+	.max_keysize = AES_MAX_KEY_SIZE,
+	.ivsize = AES_BLOCK_SIZE,
+	.base = {
+		.cra_name = "cbc(aes)",
+		.cra_driver_name = "mv-cbc-aes",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_init = mv_cesa_skcipher_cra_init,
+		.cra_exit = mv_cesa_skcipher_cra_exit,
+	},
+};
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
new file mode 100644
index 0000000..99ff54c
--- /dev/null
+++ b/drivers/crypto/marvell/hash.c
@@ -0,0 +1,1447 @@
+/*
+ * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Author: Arnaud Ebalard <arno@natisbad.org>
+ *
+ * This work is based on an initial version written by
+ * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <crypto/hmac.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#include "cesa.h"
+
+struct mv_cesa_ahash_dma_iter {
+	struct mv_cesa_dma_iter base;
+	struct mv_cesa_sg_dma_iter src;
+};
+
+static inline void
+mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
+			    struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	unsigned int len = req->nbytes + creq->cache_ptr;
+
+	if (!creq->last_req)
+		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
+
+	mv_cesa_req_dma_iter_init(&iter->base, len);
+	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
+	iter->src.op_offset = creq->cache_ptr;
+}
+
+static inline bool
+mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
+{
+	iter->src.op_offset = 0;
+
+	return mv_cesa_req_dma_iter_next_op(&iter->base);
+}
+
+static inline int
+mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
+{
+	req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
+				    &req->cache_dma);
+	if (!req->cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static inline void
+mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
+{
+	if (!req->cache)
+		return;
+
+	dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
+		      req->cache_dma);
+}
+
+static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
+					   gfp_t flags)
+{
+	if (req->padding)
+		return 0;
+
+	req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
+				      &req->padding_dma);
+	if (!req->padding)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
+{
+	if (!req->padding)
+		return;
+
+	dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
+		      req->padding_dma);
+	req->padding = NULL;
+}
+
+static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+	mv_cesa_ahash_dma_free_padding(&creq->req.dma);
+}
+
+static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
+	mv_cesa_ahash_dma_free_cache(&creq->req.dma);
+	mv_cesa_dma_cleanup(&creq->base);
+}
+
+static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+		mv_cesa_ahash_dma_cleanup(req);
+}
+
+static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+		mv_cesa_ahash_dma_last_cleanup(req);
+}
+
+static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
+{
+	unsigned int index, padlen;
+
+	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
+	padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
+
+	return padlen;
+}
+
+static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
+{
+	unsigned int index, padlen;
+
+	buf[0] = 0x80;
+	/* Pad out to 56 mod 64 */
+	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
+	padlen = mv_cesa_ahash_pad_len(creq);
+	memset(buf + 1, 0, padlen - 1);
+
+	if (creq->algo_le) {
+		__le64 bits = cpu_to_le64(creq->len << 3);
+		memcpy(buf + padlen, &bits, sizeof(bits));
+	} else {
+		__be64 bits = cpu_to_be64(creq->len << 3);
+		memcpy(buf + padlen, &bits, sizeof(bits));
+	}
+
+	return padlen + 8;
+}
+
+static void mv_cesa_ahash_std_step(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
+	struct mv_cesa_engine *engine = creq->base.engine;
+	struct mv_cesa_op_ctx *op;
+	unsigned int new_cache_ptr = 0;
+	u32 frag_mode;
+	size_t  len;
+	unsigned int digsize;
+	int i;
+
+	mv_cesa_adjust_op(engine, &creq->op_tmpl);
+	memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
+
+	if (!sreq->offset) {
+		digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+		for (i = 0; i < digsize / 4; i++)
+			writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
+	}
+
+	if (creq->cache_ptr)
+		memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
+			    creq->cache, creq->cache_ptr);
+
+	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
+		    CESA_SA_SRAM_PAYLOAD_SIZE);
+
+	if (!creq->last_req) {
+		new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
+		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
+	}
+
+	if (len - creq->cache_ptr)
+		sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
+						   engine->sram +
+						   CESA_SA_DATA_SRAM_OFFSET +
+						   creq->cache_ptr,
+						   len - creq->cache_ptr,
+						   sreq->offset);
+
+	op = &creq->op_tmpl;
+
+	frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
+
+	if (creq->last_req && sreq->offset == req->nbytes &&
+	    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
+		if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
+			frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
+		else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
+			frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
+	}
+
+	if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
+	    frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
+		if (len &&
+		    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
+			mv_cesa_set_mac_op_total_len(op, creq->len);
+		} else {
+			int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
+
+			if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
+				len &= CESA_HASH_BLOCK_SIZE_MSK;
+				new_cache_ptr = 64 - trailerlen;
+				memcpy_fromio(creq->cache,
+					      engine->sram +
+					      CESA_SA_DATA_SRAM_OFFSET + len,
+					      new_cache_ptr);
+			} else {
+				len += mv_cesa_ahash_pad_req(creq,
+						engine->sram + len +
+						CESA_SA_DATA_SRAM_OFFSET);
+			}
+
+			if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
+				frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
+			else
+				frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
+		}
+	}
+
+	mv_cesa_set_mac_op_frag_len(op, len);
+	mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
+
+	/* FIXME: only update enc_len field */
+	memcpy_toio(engine->sram, op, sizeof(*op));
+
+	if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
+		mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
+				      CESA_SA_DESC_CFG_FRAG_MSK);
+
+	creq->cache_ptr = new_cache_ptr;
+
+	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
+	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
+	BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+	       CESA_SA_CMD_EN_CESA_SA_ACCL0);
+	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
+}
+
+static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
+
+	if (sreq->offset < (req->nbytes - creq->cache_ptr))
+		return -EINPROGRESS;
+
+	return 0;
+}
+
+static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_req *basereq = &creq->base;
+
+	mv_cesa_dma_prepare(basereq, basereq->engine);
+}
+
+static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
+
+	sreq->offset = 0;
+}
+
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_req *base = &creq->base;
+
+	/* We must explicitly set the digest state. */
+	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+		struct mv_cesa_engine *engine = base->engine;
+		int i;
+
+		/* Set the hash state in the IVDIG regs. */
+		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+			writel_relaxed(creq->state[i], engine->regs +
+				       CESA_IVDIG(i));
+	}
+
+	mv_cesa_dma_step(base);
+}
+
+static void mv_cesa_ahash_step(struct crypto_async_request *req)
+{
+	struct ahash_request *ahashreq = ahash_request_cast(req);
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+		mv_cesa_ahash_dma_step(ahashreq);
+	else
+		mv_cesa_ahash_std_step(ahashreq);
+}
+
+static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
+{
+	struct ahash_request *ahashreq = ahash_request_cast(req);
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+		return mv_cesa_dma_process(&creq->base, status);
+
+	return mv_cesa_ahash_std_process(ahashreq, status);
+}
+
+static void mv_cesa_ahash_complete(struct crypto_async_request *req)
+{
+	struct ahash_request *ahashreq = ahash_request_cast(req);
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+	struct mv_cesa_engine *engine = creq->base.engine;
+	unsigned int digsize;
+	int i;
+
+	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+	    (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+		__le32 *data = NULL;
+
+		/*
+		 * Result is already in the correct endianess when the SA is
+		 * used
+		 */
+		data = creq->base.chain.last->op->ctx.hash.hash;
+		for (i = 0; i < digsize / 4; i++)
+			creq->state[i] = cpu_to_le32(data[i]);
+
+		memcpy(ahashreq->result, data, digsize);
+	} else {
+		for (i = 0; i < digsize / 4; i++)
+			creq->state[i] = readl_relaxed(engine->regs +
+						       CESA_IVDIG(i));
+		if (creq->last_req) {
+			/*
+			* Hardware's MD5 digest is in little endian format, but
+			* SHA in big endian format
+			*/
+			if (creq->algo_le) {
+				__le32 *result = (void *)ahashreq->result;
+
+				for (i = 0; i < digsize / 4; i++)
+					result[i] = cpu_to_le32(creq->state[i]);
+			} else {
+				__be32 *result = (void *)ahashreq->result;
+
+				for (i = 0; i < digsize / 4; i++)
+					result[i] = cpu_to_be32(creq->state[i]);
+			}
+		}
+	}
+
+	atomic_sub(ahashreq->nbytes, &engine->load);
+}
+
+static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
+				  struct mv_cesa_engine *engine)
+{
+	struct ahash_request *ahashreq = ahash_request_cast(req);
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+
+	creq->base.engine = engine;
+
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+		mv_cesa_ahash_dma_prepare(ahashreq);
+	else
+		mv_cesa_ahash_std_prepare(ahashreq);
+}
+
+static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
+{
+	struct ahash_request *ahashreq = ahash_request_cast(req);
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+
+	if (creq->last_req)
+		mv_cesa_ahash_last_cleanup(ahashreq);
+
+	mv_cesa_ahash_cleanup(ahashreq);
+
+	if (creq->cache_ptr)
+		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
+				   creq->cache,
+				   creq->cache_ptr,
+				   ahashreq->nbytes - creq->cache_ptr);
+}
+
+static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
+	.step = mv_cesa_ahash_step,
+	.process = mv_cesa_ahash_process,
+	.cleanup = mv_cesa_ahash_req_cleanup,
+	.complete = mv_cesa_ahash_complete,
+};
+
+static void mv_cesa_ahash_init(struct ahash_request *req,
+			      struct mv_cesa_op_ctx *tmpl, bool algo_le)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+	memset(creq, 0, sizeof(*creq));
+	mv_cesa_update_op_cfg(tmpl,
+			      CESA_SA_DESC_CFG_OP_MAC_ONLY |
+			      CESA_SA_DESC_CFG_FIRST_FRAG,
+			      CESA_SA_DESC_CFG_OP_MSK |
+			      CESA_SA_DESC_CFG_FRAG_MSK);
+	mv_cesa_set_mac_op_total_len(tmpl, 0);
+	mv_cesa_set_mac_op_frag_len(tmpl, 0);
+	creq->op_tmpl = *tmpl;
+	creq->len = 0;
+	creq->algo_le = algo_le;
+}
+
+static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->base.ops = &mv_cesa_ahash_req_ops;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct mv_cesa_ahash_req));
+	return 0;
+}
+
+static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	bool cached = false;
+
+	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
+		cached = true;
+
+		if (!req->nbytes)
+			return cached;
+
+		sg_pcopy_to_buffer(req->src, creq->src_nents,
+				   creq->cache + creq->cache_ptr,
+				   req->nbytes, 0);
+
+		creq->cache_ptr += req->nbytes;
+	}
+
+	return cached;
+}
+
+static struct mv_cesa_op_ctx *
+mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
+		     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
+		     gfp_t flags)
+{
+	struct mv_cesa_op_ctx *op;
+	int ret;
+
+	op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
+	if (IS_ERR(op))
+		return op;
+
+	/* Set the operation block fragment length. */
+	mv_cesa_set_mac_op_frag_len(op, frag_len);
+
+	/* Append dummy desc to launch operation */
+	ret = mv_cesa_dma_add_dummy_launch(chain, flags);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (mv_cesa_mac_op_is_first_frag(tmpl))
+		mv_cesa_update_op_cfg(tmpl,
+				      CESA_SA_DESC_CFG_MID_FRAG,
+				      CESA_SA_DESC_CFG_FRAG_MSK);
+
+	return op;
+}
+
+static int
+mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
+			    struct mv_cesa_ahash_req *creq,
+			    gfp_t flags)
+{
+	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
+	int ret;
+
+	if (!creq->cache_ptr)
+		return 0;
+
+	ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
+	if (ret)
+		return ret;
+
+	memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
+
+	return mv_cesa_dma_add_data_transfer(chain,
+					     CESA_SA_DATA_SRAM_OFFSET,
+					     ahashdreq->cache_dma,
+					     creq->cache_ptr,
+					     CESA_TDMA_DST_IN_SRAM,
+					     flags);
+}
+
+static struct mv_cesa_op_ctx *
+mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
+			   struct mv_cesa_ahash_dma_iter *dma_iter,
+			   struct mv_cesa_ahash_req *creq,
+			   unsigned int frag_len, gfp_t flags)
+{
+	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
+	unsigned int len, trailerlen, padoff = 0;
+	struct mv_cesa_op_ctx *op;
+	int ret;
+
+	/*
+	 * If the transfer is smaller than our maximum length, and we have
+	 * some data outstanding, we can ask the engine to finish the hash.
+	 */
+	if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
+		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
+					  flags);
+		if (IS_ERR(op))
+			return op;
+
+		mv_cesa_set_mac_op_total_len(op, creq->len);
+		mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
+						CESA_SA_DESC_CFG_NOT_FRAG :
+						CESA_SA_DESC_CFG_LAST_FRAG,
+				      CESA_SA_DESC_CFG_FRAG_MSK);
+
+		ret = mv_cesa_dma_add_result_op(chain,
+						CESA_SA_CFG_SRAM_OFFSET,
+						CESA_SA_DATA_SRAM_OFFSET,
+						CESA_TDMA_SRC_IN_SRAM, flags);
+		if (ret)
+			return ERR_PTR(-ENOMEM);
+		return op;
+	}
+
+	/*
+	 * The request is longer than the engine can handle, or we have
+	 * no data outstanding. Manually generate the padding, adding it
+	 * as a "mid" fragment.
+	 */
+	ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
+	if (ret)
+		return ERR_PTR(ret);
+
+	trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
+
+	len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
+	if (len) {
+		ret = mv_cesa_dma_add_data_transfer(chain,
+						CESA_SA_DATA_SRAM_OFFSET +
+						frag_len,
+						ahashdreq->padding_dma,
+						len, CESA_TDMA_DST_IN_SRAM,
+						flags);
+		if (ret)
+			return ERR_PTR(ret);
+
+		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
+					  flags);
+		if (IS_ERR(op))
+			return op;
+
+		if (len == trailerlen)
+			return op;
+
+		padoff += len;
+	}
+
+	ret = mv_cesa_dma_add_data_transfer(chain,
+					    CESA_SA_DATA_SRAM_OFFSET,
+					    ahashdreq->padding_dma +
+					    padoff,
+					    trailerlen - padoff,
+					    CESA_TDMA_DST_IN_SRAM,
+					    flags);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
+				    flags);
+}
+
+static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		      GFP_KERNEL : GFP_ATOMIC;
+	struct mv_cesa_req *basereq = &creq->base;
+	struct mv_cesa_ahash_dma_iter iter;
+	struct mv_cesa_op_ctx *op = NULL;
+	unsigned int frag_len;
+	bool set_state = false;
+	int ret;
+	u32 type;
+
+	basereq->chain.first = NULL;
+	basereq->chain.last = NULL;
+
+	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+		set_state = true;
+
+	if (creq->src_nents) {
+		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
+				 DMA_TO_DEVICE);
+		if (!ret) {
+			ret = -ENOMEM;
+			goto err;
+		}
+	}
+
+	mv_cesa_tdma_desc_iter_init(&basereq->chain);
+	mv_cesa_ahash_req_iter_init(&iter, req);
+
+	/*
+	 * Add the cache (left-over data from a previous block) first.
+	 * This will never overflow the SRAM size.
+	 */
+	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
+	if (ret)
+		goto err_free_tdma;
+
+	if (iter.src.sg) {
+		/*
+		 * Add all the new data, inserting an operation block and
+		 * launch command between each full SRAM block-worth of
+		 * data. We intentionally do not add the final op block.
+		 */
+		while (true) {
+			ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
+							   &iter.base,
+							   &iter.src, flags);
+			if (ret)
+				goto err_free_tdma;
+
+			frag_len = iter.base.op_len;
+
+			if (!mv_cesa_ahash_req_iter_next_op(&iter))
+				break;
+
+			op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
+						  frag_len, flags);
+			if (IS_ERR(op)) {
+				ret = PTR_ERR(op);
+				goto err_free_tdma;
+			}
+		}
+	} else {
+		/* Account for the data that was in the cache. */
+		frag_len = iter.base.op_len;
+	}
+
+	/*
+	 * At this point, frag_len indicates whether we have any data
+	 * outstanding which needs an operation.  Queue up the final
+	 * operation, which depends whether this is the final request.
+	 */
+	if (creq->last_req)
+		op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
+						frag_len, flags);
+	else if (frag_len)
+		op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
+					  frag_len, flags);
+
+	if (IS_ERR(op)) {
+		ret = PTR_ERR(op);
+		goto err_free_tdma;
+	}
+
+	/*
+	 * If results are copied via DMA, this means that this
+	 * request can be directly processed by the engine,
+	 * without partial updates. So we can chain it at the
+	 * DMA level with other requests.
+	 */
+	type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+
+	if (op && type != CESA_TDMA_RESULT) {
+		/* Add dummy desc to wait for crypto operation end */
+		ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
+		if (ret)
+			goto err_free_tdma;
+	}
+
+	if (!creq->last_req)
+		creq->cache_ptr = req->nbytes + creq->cache_ptr -
+				  iter.base.len;
+	else
+		creq->cache_ptr = 0;
+
+	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+	if (type != CESA_TDMA_RESULT)
+		basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
+
+	if (set_state) {
+		/*
+		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+		 * let the step logic know that the IVDIG registers should be
+		 * explicitly set before launching a TDMA chain.
+		 */
+		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+	}
+
+	return 0;
+
+err_free_tdma:
+	mv_cesa_dma_cleanup(basereq);
+	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
+
+err:
+	mv_cesa_ahash_last_cleanup(req);
+
+	return ret;
+}
+
+static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (creq->src_nents < 0) {
+		dev_err(cesa_dev->dev, "Invalid number of src SG");
+		return creq->src_nents;
+	}
+
+	*cached = mv_cesa_ahash_cache_req(req);
+
+	if (*cached)
+		return 0;
+
+	if (cesa_dev->caps->has_tdma)
+		return mv_cesa_ahash_dma_req_init(req);
+	else
+		return 0;
+}
+
+static int mv_cesa_ahash_queue_req(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_engine *engine;
+	bool cached = false;
+	int ret;
+
+	ret = mv_cesa_ahash_req_init(req, &cached);
+	if (ret)
+		return ret;
+
+	if (cached)
+		return 0;
+
+	engine = mv_cesa_select_engine(req->nbytes);
+	mv_cesa_ahash_prepare(&req->base, engine);
+
+	ret = mv_cesa_queue_req(&req->base, &creq->base);
+
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
+		mv_cesa_ahash_cleanup(req);
+
+	return ret;
+}
+
+static int mv_cesa_ahash_update(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+	creq->len += req->nbytes;
+
+	return mv_cesa_ahash_queue_req(req);
+}
+
+static int mv_cesa_ahash_final(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
+
+	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
+	creq->last_req = true;
+	req->nbytes = 0;
+
+	return mv_cesa_ahash_queue_req(req);
+}
+
+static int mv_cesa_ahash_finup(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
+
+	creq->len += req->nbytes;
+	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
+	creq->last_req = true;
+
+	return mv_cesa_ahash_queue_req(req);
+}
+
+static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
+				u64 *len, void *cache)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	unsigned int digsize = crypto_ahash_digestsize(ahash);
+	unsigned int blocksize;
+
+	blocksize = crypto_ahash_blocksize(ahash);
+
+	*len = creq->len;
+	memcpy(hash, creq->state, digsize);
+	memset(cache, 0, blocksize);
+	memcpy(cache, creq->cache, creq->cache_ptr);
+
+	return 0;
+}
+
+static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
+				u64 len, const void *cache)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	unsigned int digsize = crypto_ahash_digestsize(ahash);
+	unsigned int blocksize;
+	unsigned int cache_ptr;
+	int ret;
+
+	ret = crypto_ahash_init(req);
+	if (ret)
+		return ret;
+
+	blocksize = crypto_ahash_blocksize(ahash);
+	if (len >= blocksize)
+		mv_cesa_update_op_cfg(&creq->op_tmpl,
+				      CESA_SA_DESC_CFG_MID_FRAG,
+				      CESA_SA_DESC_CFG_FRAG_MSK);
+
+	creq->len = len;
+	memcpy(creq->state, hash, digsize);
+	creq->cache_ptr = 0;
+
+	cache_ptr = do_div(len, blocksize);
+	if (!cache_ptr)
+		return 0;
+
+	memcpy(creq->cache, cache, cache_ptr);
+	creq->cache_ptr = cache_ptr;
+
+	return 0;
+}
+
+static int mv_cesa_md5_init(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_op_ctx tmpl = { };
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
+
+	mv_cesa_ahash_init(req, &tmpl, true);
+
+	creq->state[0] = MD5_H0;
+	creq->state[1] = MD5_H1;
+	creq->state[2] = MD5_H2;
+	creq->state[3] = MD5_H3;
+
+	return 0;
+}
+
+static int mv_cesa_md5_export(struct ahash_request *req, void *out)
+{
+	struct md5_state *out_state = out;
+
+	return mv_cesa_ahash_export(req, out_state->hash,
+				    &out_state->byte_count, out_state->block);
+}
+
+static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
+{
+	const struct md5_state *in_state = in;
+
+	return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
+				    in_state->block);
+}
+
+static int mv_cesa_md5_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = mv_cesa_md5_init(req);
+	if (ret)
+		return ret;
+
+	return mv_cesa_ahash_finup(req);
+}
+
+struct ahash_alg mv_md5_alg = {
+	.init = mv_cesa_md5_init,
+	.update = mv_cesa_ahash_update,
+	.final = mv_cesa_ahash_final,
+	.finup = mv_cesa_ahash_finup,
+	.digest = mv_cesa_md5_digest,
+	.export = mv_cesa_md5_export,
+	.import = mv_cesa_md5_import,
+	.halg = {
+		.digestsize = MD5_DIGEST_SIZE,
+		.statesize = sizeof(struct md5_state),
+		.base = {
+			.cra_name = "md5",
+			.cra_driver_name = "mv-md5",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
+			.cra_init = mv_cesa_ahash_cra_init,
+			.cra_module = THIS_MODULE,
+		 }
+	}
+};
+
+static int mv_cesa_sha1_init(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_op_ctx tmpl = { };
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
+
+	mv_cesa_ahash_init(req, &tmpl, false);
+
+	creq->state[0] = SHA1_H0;
+	creq->state[1] = SHA1_H1;
+	creq->state[2] = SHA1_H2;
+	creq->state[3] = SHA1_H3;
+	creq->state[4] = SHA1_H4;
+
+	return 0;
+}
+
+static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
+{
+	struct sha1_state *out_state = out;
+
+	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
+				    out_state->buffer);
+}
+
+static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
+{
+	const struct sha1_state *in_state = in;
+
+	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
+				    in_state->buffer);
+}
+
+static int mv_cesa_sha1_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = mv_cesa_sha1_init(req);
+	if (ret)
+		return ret;
+
+	return mv_cesa_ahash_finup(req);
+}
+
+struct ahash_alg mv_sha1_alg = {
+	.init = mv_cesa_sha1_init,
+	.update = mv_cesa_ahash_update,
+	.final = mv_cesa_ahash_final,
+	.finup = mv_cesa_ahash_finup,
+	.digest = mv_cesa_sha1_digest,
+	.export = mv_cesa_sha1_export,
+	.import = mv_cesa_sha1_import,
+	.halg = {
+		.digestsize = SHA1_DIGEST_SIZE,
+		.statesize = sizeof(struct sha1_state),
+		.base = {
+			.cra_name = "sha1",
+			.cra_driver_name = "mv-sha1",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = SHA1_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
+			.cra_init = mv_cesa_ahash_cra_init,
+			.cra_module = THIS_MODULE,
+		 }
+	}
+};
+
+static int mv_cesa_sha256_init(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_op_ctx tmpl = { };
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
+
+	mv_cesa_ahash_init(req, &tmpl, false);
+
+	creq->state[0] = SHA256_H0;
+	creq->state[1] = SHA256_H1;
+	creq->state[2] = SHA256_H2;
+	creq->state[3] = SHA256_H3;
+	creq->state[4] = SHA256_H4;
+	creq->state[5] = SHA256_H5;
+	creq->state[6] = SHA256_H6;
+	creq->state[7] = SHA256_H7;
+
+	return 0;
+}
+
+static int mv_cesa_sha256_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = mv_cesa_sha256_init(req);
+	if (ret)
+		return ret;
+
+	return mv_cesa_ahash_finup(req);
+}
+
+static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
+{
+	struct sha256_state *out_state = out;
+
+	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
+				    out_state->buf);
+}
+
+static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
+{
+	const struct sha256_state *in_state = in;
+
+	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
+				    in_state->buf);
+}
+
+struct ahash_alg mv_sha256_alg = {
+	.init = mv_cesa_sha256_init,
+	.update = mv_cesa_ahash_update,
+	.final = mv_cesa_ahash_final,
+	.finup = mv_cesa_ahash_finup,
+	.digest = mv_cesa_sha256_digest,
+	.export = mv_cesa_sha256_export,
+	.import = mv_cesa_sha256_import,
+	.halg = {
+		.digestsize = SHA256_DIGEST_SIZE,
+		.statesize = sizeof(struct sha256_state),
+		.base = {
+			.cra_name = "sha256",
+			.cra_driver_name = "mv-sha256",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = SHA256_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
+			.cra_init = mv_cesa_ahash_cra_init,
+			.cra_module = THIS_MODULE,
+		 }
+	}
+};
+
+struct mv_cesa_ahash_result {
+	struct completion completion;
+	int error;
+};
+
+static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
+					int error)
+{
+	struct mv_cesa_ahash_result *result = req->data;
+
+	if (error == -EINPROGRESS)
+		return;
+
+	result->error = error;
+	complete(&result->completion);
+}
+
+static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
+				       void *state, unsigned int blocksize)
+{
+	struct mv_cesa_ahash_result result;
+	struct scatterlist sg;
+	int ret;
+
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   mv_cesa_hmac_ahash_complete, &result);
+	sg_init_one(&sg, pad, blocksize);
+	ahash_request_set_crypt(req, &sg, pad, blocksize);
+	init_completion(&result.completion);
+
+	ret = crypto_ahash_init(req);
+	if (ret)
+		return ret;
+
+	ret = crypto_ahash_update(req);
+	if (ret && ret != -EINPROGRESS)
+		return ret;
+
+	wait_for_completion_interruptible(&result.completion);
+	if (result.error)
+		return result.error;
+
+	ret = crypto_ahash_export(req, state);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
+				  const u8 *key, unsigned int keylen,
+				  u8 *ipad, u8 *opad,
+				  unsigned int blocksize)
+{
+	struct mv_cesa_ahash_result result;
+	struct scatterlist sg;
+	int ret;
+	int i;
+
+	if (keylen <= blocksize) {
+		memcpy(ipad, key, keylen);
+	} else {
+		u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
+
+		if (!keydup)
+			return -ENOMEM;
+
+		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					   mv_cesa_hmac_ahash_complete,
+					   &result);
+		sg_init_one(&sg, keydup, keylen);
+		ahash_request_set_crypt(req, &sg, ipad, keylen);
+		init_completion(&result.completion);
+
+		ret = crypto_ahash_digest(req);
+		if (ret == -EINPROGRESS) {
+			wait_for_completion_interruptible(&result.completion);
+			ret = result.error;
+		}
+
+		/* Set the memory region to 0 to avoid any leak. */
+		memset(keydup, 0, keylen);
+		kfree(keydup);
+
+		if (ret)
+			return ret;
+
+		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+	}
+
+	memset(ipad + keylen, 0, blocksize - keylen);
+	memcpy(opad, ipad, blocksize);
+
+	for (i = 0; i < blocksize; i++) {
+		ipad[i] ^= HMAC_IPAD_VALUE;
+		opad[i] ^= HMAC_OPAD_VALUE;
+	}
+
+	return 0;
+}
+
+static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
+				const u8 *key, unsigned int keylen,
+				void *istate, void *ostate)
+{
+	struct ahash_request *req;
+	struct crypto_ahash *tfm;
+	unsigned int blocksize;
+	u8 *ipad = NULL;
+	u8 *opad;
+	int ret;
+
+	tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		ret = -ENOMEM;
+		goto free_ahash;
+	}
+
+	crypto_ahash_clear_flags(tfm, ~0);
+
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	ipad = kcalloc(2, blocksize, GFP_KERNEL);
+	if (!ipad) {
+		ret = -ENOMEM;
+		goto free_req;
+	}
+
+	opad = ipad + blocksize;
+
+	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
+	if (ret)
+		goto free_ipad;
+
+	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
+	if (ret)
+		goto free_ipad;
+
+	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
+
+free_ipad:
+	kfree(ipad);
+free_req:
+	ahash_request_free(req);
+free_ahash:
+	crypto_free_ahash(tfm);
+
+	return ret;
+}
+
+static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->base.ops = &mv_cesa_ahash_req_ops;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct mv_cesa_ahash_req));
+	return 0;
+}
+
+static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
+{
+	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_cesa_op_ctx tmpl = { };
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
+	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
+
+	mv_cesa_ahash_init(req, &tmpl, true);
+
+	return 0;
+}
+
+static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+				    unsigned int keylen)
+{
+	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct md5_state istate, ostate;
+	int ret, i;
+
+	ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
+		ctx->iv[i] = be32_to_cpu(istate.hash[i]);
+
+	for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
+		ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
+
+	return 0;
+}
+
+static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = mv_cesa_ahmac_md5_init(req);
+	if (ret)
+		return ret;
+
+	return mv_cesa_ahash_finup(req);
+}
+
+struct ahash_alg mv_ahmac_md5_alg = {
+	.init = mv_cesa_ahmac_md5_init,
+	.update = mv_cesa_ahash_update,
+	.final = mv_cesa_ahash_final,
+	.finup = mv_cesa_ahash_finup,
+	.digest = mv_cesa_ahmac_md5_digest,
+	.setkey = mv_cesa_ahmac_md5_setkey,
+	.export = mv_cesa_md5_export,
+	.import = mv_cesa_md5_import,
+	.halg = {
+		.digestsize = MD5_DIGEST_SIZE,
+		.statesize = sizeof(struct md5_state),
+		.base = {
+			.cra_name = "hmac(md5)",
+			.cra_driver_name = "mv-hmac-md5",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
+			.cra_init = mv_cesa_ahmac_cra_init,
+			.cra_module = THIS_MODULE,
+		 }
+	}
+};
+
+static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
+{
+	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_cesa_op_ctx tmpl = { };
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
+	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
+
+	mv_cesa_ahash_init(req, &tmpl, false);
+
+	return 0;
+}
+
+static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct sha1_state istate, ostate;
+	int ret, i;
+
+	ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
+		ctx->iv[i] = be32_to_cpu(istate.state[i]);
+
+	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
+		ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
+
+	return 0;
+}
+
+static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = mv_cesa_ahmac_sha1_init(req);
+	if (ret)
+		return ret;
+
+	return mv_cesa_ahash_finup(req);
+}
+
+struct ahash_alg mv_ahmac_sha1_alg = {
+	.init = mv_cesa_ahmac_sha1_init,
+	.update = mv_cesa_ahash_update,
+	.final = mv_cesa_ahash_final,
+	.finup = mv_cesa_ahash_finup,
+	.digest = mv_cesa_ahmac_sha1_digest,
+	.setkey = mv_cesa_ahmac_sha1_setkey,
+	.export = mv_cesa_sha1_export,
+	.import = mv_cesa_sha1_import,
+	.halg = {
+		.digestsize = SHA1_DIGEST_SIZE,
+		.statesize = sizeof(struct sha1_state),
+		.base = {
+			.cra_name = "hmac(sha1)",
+			.cra_driver_name = "mv-hmac-sha1",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = SHA1_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
+			.cra_init = mv_cesa_ahmac_cra_init,
+			.cra_module = THIS_MODULE,
+		 }
+	}
+};
+
+static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct sha256_state istate, ostate;
+	int ret, i;
+
+	ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
+		ctx->iv[i] = be32_to_cpu(istate.state[i]);
+
+	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
+		ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
+
+	return 0;
+}
+
+static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
+{
+	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_cesa_op_ctx tmpl = { };
+
+	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
+	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
+
+	mv_cesa_ahash_init(req, &tmpl, false);
+
+	return 0;
+}
+
+static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = mv_cesa_ahmac_sha256_init(req);
+	if (ret)
+		return ret;
+
+	return mv_cesa_ahash_finup(req);
+}
+
+struct ahash_alg mv_ahmac_sha256_alg = {
+	.init = mv_cesa_ahmac_sha256_init,
+	.update = mv_cesa_ahash_update,
+	.final = mv_cesa_ahash_final,
+	.finup = mv_cesa_ahash_finup,
+	.digest = mv_cesa_ahmac_sha256_digest,
+	.setkey = mv_cesa_ahmac_sha256_setkey,
+	.export = mv_cesa_sha256_export,
+	.import = mv_cesa_sha256_import,
+	.halg = {
+		.digestsize = SHA256_DIGEST_SIZE,
+		.statesize = sizeof(struct sha256_state),
+		.base = {
+			.cra_name = "hmac(sha256)",
+			.cra_driver_name = "mv-hmac-sha256",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = SHA256_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
+			.cra_init = mv_cesa_ahmac_cra_init,
+			.cra_module = THIS_MODULE,
+		 }
+	}
+};
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
new file mode 100644
index 0000000..d0ef171
--- /dev/null
+++ b/drivers/crypto/marvell/tdma.c
@@ -0,0 +1,353 @@
+/*
+ * Provide TDMA helper functions used by cipher and hash algorithm
+ * implementations.
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Author: Arnaud Ebalard <arno@natisbad.org>
+ *
+ * This work is based on an initial version written by
+ * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include "cesa.h"
+
+bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
+					struct mv_cesa_sg_dma_iter *sgiter,
+					unsigned int len)
+{
+	if (!sgiter->sg)
+		return false;
+
+	sgiter->op_offset += len;
+	sgiter->offset += len;
+	if (sgiter->offset == sg_dma_len(sgiter->sg)) {
+		if (sg_is_last(sgiter->sg))
+			return false;
+		sgiter->offset = 0;
+		sgiter->sg = sg_next(sgiter->sg);
+	}
+
+	if (sgiter->op_offset == iter->op_len)
+		return false;
+
+	return true;
+}
+
+void mv_cesa_dma_step(struct mv_cesa_req *dreq)
+{
+	struct mv_cesa_engine *engine = dreq->engine;
+
+	writel_relaxed(0, engine->regs + CESA_SA_CFG);
+
+	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
+	writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
+		       CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
+		       engine->regs + CESA_TDMA_CONTROL);
+
+	writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
+		       CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
+		       engine->regs + CESA_SA_CFG);
+	writel_relaxed(dreq->chain.first->cur_dma,
+		       engine->regs + CESA_TDMA_NEXT_ADDR);
+	BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+	       CESA_SA_CMD_EN_CESA_SA_ACCL0);
+	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
+}
+
+void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
+{
+	struct mv_cesa_tdma_desc *tdma;
+
+	for (tdma = dreq->chain.first; tdma;) {
+		struct mv_cesa_tdma_desc *old_tdma = tdma;
+		u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
+
+		if (type == CESA_TDMA_OP)
+			dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
+				      le32_to_cpu(tdma->src));
+
+		tdma = tdma->next;
+		dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
+			      old_tdma->cur_dma);
+	}
+
+	dreq->chain.first = NULL;
+	dreq->chain.last = NULL;
+}
+
+void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
+			 struct mv_cesa_engine *engine)
+{
+	struct mv_cesa_tdma_desc *tdma;
+
+	for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
+		if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
+			tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma);
+
+		if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
+			tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
+
+		if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
+			mv_cesa_adjust_op(engine, tdma->op);
+	}
+}
+
+void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
+			struct mv_cesa_req *dreq)
+{
+	if (engine->chain.first == NULL && engine->chain.last == NULL) {
+		engine->chain.first = dreq->chain.first;
+		engine->chain.last  = dreq->chain.last;
+	} else {
+		struct mv_cesa_tdma_desc *last;
+
+		last = engine->chain.last;
+		last->next = dreq->chain.first;
+		engine->chain.last = dreq->chain.last;
+
+		/*
+		 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+		 * the last element of the current chain, or if the request
+		 * being queued needs the IV regs to be set before lauching
+		 * the request.
+		 */
+		if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+		    !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
+			last->next_dma = dreq->chain.first->cur_dma;
+	}
+}
+
+int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
+{
+	struct crypto_async_request *req = NULL;
+	struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
+	dma_addr_t tdma_cur;
+	int res = 0;
+
+	tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
+
+	for (tdma = engine->chain.first; tdma; tdma = next) {
+		spin_lock_bh(&engine->lock);
+		next = tdma->next;
+		spin_unlock_bh(&engine->lock);
+
+		if (tdma->flags & CESA_TDMA_END_OF_REQ) {
+			struct crypto_async_request *backlog = NULL;
+			struct mv_cesa_ctx *ctx;
+			u32 current_status;
+
+			spin_lock_bh(&engine->lock);
+			/*
+			 * if req is NULL, this means we're processing the
+			 * request in engine->req.
+			 */
+			if (!req)
+				req = engine->req;
+			else
+				req = mv_cesa_dequeue_req_locked(engine,
+								 &backlog);
+
+			/* Re-chaining to the next request */
+			engine->chain.first = tdma->next;
+			tdma->next = NULL;
+
+			/* If this is the last request, clear the chain */
+			if (engine->chain.first == NULL)
+				engine->chain.last  = NULL;
+			spin_unlock_bh(&engine->lock);
+
+			ctx = crypto_tfm_ctx(req->tfm);
+			current_status = (tdma->cur_dma == tdma_cur) ?
+					  status : CESA_SA_INT_ACC0_IDMA_DONE;
+			res = ctx->ops->process(req, current_status);
+			ctx->ops->complete(req);
+
+			if (res == 0)
+				mv_cesa_engine_enqueue_complete_request(engine,
+									req);
+
+			if (backlog)
+				backlog->complete(backlog, -EINPROGRESS);
+		}
+
+		if (res || tdma->cur_dma == tdma_cur)
+			break;
+	}
+
+	/* Save the last request in error to engine->req, so that the core
+	 * knows which request was fautly */
+	if (res) {
+		spin_lock_bh(&engine->lock);
+		engine->req = req;
+		spin_unlock_bh(&engine->lock);
+	}
+
+	return res;
+}
+
+static struct mv_cesa_tdma_desc *
+mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
+{
+	struct mv_cesa_tdma_desc *new_tdma = NULL;
+	dma_addr_t dma_handle;
+
+	new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
+				   &dma_handle);
+	if (!new_tdma)
+		return ERR_PTR(-ENOMEM);
+
+	new_tdma->cur_dma = dma_handle;
+	if (chain->last) {
+		chain->last->next_dma = cpu_to_le32(dma_handle);
+		chain->last->next = new_tdma;
+	} else {
+		chain->first = new_tdma;
+	}
+
+	chain->last = new_tdma;
+
+	return new_tdma;
+}
+
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+			  u32 size, u32 flags, gfp_t gfp_flags)
+{
+	struct mv_cesa_tdma_desc *tdma, *op_desc;
+
+	tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
+	if (IS_ERR(tdma))
+		return PTR_ERR(tdma);
+
+	/* We re-use an existing op_desc object to retrieve the context
+	 * and result instead of allocating a new one.
+	 * There is at least one object of this type in a CESA crypto
+	 * req, just pick the first one in the chain.
+	 */
+	for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
+		u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
+
+		if (type == CESA_TDMA_OP)
+			break;
+	}
+
+	if (!op_desc)
+		return -EIO;
+
+	tdma->byte_cnt = cpu_to_le32(size | BIT(31));
+	tdma->src = src;
+	tdma->dst = op_desc->src;
+	tdma->op = op_desc->op;
+
+	flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
+	tdma->flags = flags | CESA_TDMA_RESULT;
+	return 0;
+}
+
+struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
+					const struct mv_cesa_op_ctx *op_templ,
+					bool skip_ctx,
+					gfp_t flags)
+{
+	struct mv_cesa_tdma_desc *tdma;
+	struct mv_cesa_op_ctx *op;
+	dma_addr_t dma_handle;
+	unsigned int size;
+
+	tdma = mv_cesa_dma_add_desc(chain, flags);
+	if (IS_ERR(tdma))
+		return ERR_CAST(tdma);
+
+	op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle);
+	if (!op)
+		return ERR_PTR(-ENOMEM);
+
+	*op = *op_templ;
+
+	size = skip_ctx ? sizeof(op->desc) : sizeof(*op);
+
+	tdma = chain->last;
+	tdma->op = op;
+	tdma->byte_cnt = cpu_to_le32(size | BIT(31));
+	tdma->src = cpu_to_le32(dma_handle);
+	tdma->dst = CESA_SA_CFG_SRAM_OFFSET;
+	tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
+
+	return op;
+}
+
+int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
+				  dma_addr_t dst, dma_addr_t src, u32 size,
+				  u32 flags, gfp_t gfp_flags)
+{
+	struct mv_cesa_tdma_desc *tdma;
+
+	tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
+	if (IS_ERR(tdma))
+		return PTR_ERR(tdma);
+
+	tdma->byte_cnt = cpu_to_le32(size | BIT(31));
+	tdma->src = src;
+	tdma->dst = dst;
+
+	flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
+	tdma->flags = flags | CESA_TDMA_DATA;
+
+	return 0;
+}
+
+int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
+{
+	struct mv_cesa_tdma_desc *tdma;
+
+	tdma = mv_cesa_dma_add_desc(chain, flags);
+	return PTR_ERR_OR_ZERO(tdma);
+}
+
+int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
+{
+	struct mv_cesa_tdma_desc *tdma;
+
+	tdma = mv_cesa_dma_add_desc(chain, flags);
+	if (IS_ERR(tdma))
+		return PTR_ERR(tdma);
+
+	tdma->byte_cnt = cpu_to_le32(BIT(31));
+
+	return 0;
+}
+
+int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
+				 struct mv_cesa_dma_iter *dma_iter,
+				 struct mv_cesa_sg_dma_iter *sgiter,
+				 gfp_t gfp_flags)
+{
+	u32 flags = sgiter->dir == DMA_TO_DEVICE ?
+		    CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM;
+	unsigned int len;
+
+	do {
+		dma_addr_t dst, src;
+		int ret;
+
+		len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter);
+		if (sgiter->dir == DMA_TO_DEVICE) {
+			dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
+			src = sg_dma_address(sgiter->sg) + sgiter->offset;
+		} else {
+			dst = sg_dma_address(sgiter->sg) + sgiter->offset;
+			src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
+		}
+
+		ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len,
+						    flags, gfp_flags);
+		if (ret)
+			return ret;
+
+	} while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len));
+
+	return 0;
+}
diff --git a/drivers/crypto/mediatek/Makefile b/drivers/crypto/mediatek/Makefile
new file mode 100644
index 0000000..187be79
--- /dev/null
+++ b/drivers/crypto/mediatek/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mtk-crypto.o
+mtk-crypto-objs:= mtk-platform.o mtk-aes.o mtk-sha.o
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
new file mode 100644
index 0000000..c2058cf
--- /dev/null
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -0,0 +1,1289 @@
+/*
+ * Cryptographic API.
+ *
+ * Driver for EIP97 AES acceleration.
+ *
+ * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Some ideas are from atmel-aes.c drivers.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include "mtk-platform.h"
+
+#define AES_QUEUE_SIZE		512
+#define AES_BUF_ORDER		2
+#define AES_BUF_SIZE		((PAGE_SIZE << AES_BUF_ORDER) \
+				& ~(AES_BLOCK_SIZE - 1))
+#define AES_MAX_STATE_BUF_SIZE	SIZE_IN_WORDS(AES_KEYSIZE_256 + \
+				AES_BLOCK_SIZE * 2)
+#define AES_MAX_CT_SIZE		6
+
+#define AES_CT_CTRL_HDR		cpu_to_le32(0x00220000)
+
+/* AES-CBC/ECB/CTR command token */
+#define AES_CMD0		cpu_to_le32(0x05000000)
+#define AES_CMD1		cpu_to_le32(0x2d060000)
+#define AES_CMD2		cpu_to_le32(0xe4a63806)
+/* AES-GCM command token */
+#define AES_GCM_CMD0		cpu_to_le32(0x0b000000)
+#define AES_GCM_CMD1		cpu_to_le32(0xa0800000)
+#define AES_GCM_CMD2		cpu_to_le32(0x25000010)
+#define AES_GCM_CMD3		cpu_to_le32(0x0f020000)
+#define AES_GCM_CMD4		cpu_to_le32(0x21e60000)
+#define AES_GCM_CMD5		cpu_to_le32(0x40e60000)
+#define AES_GCM_CMD6		cpu_to_le32(0xd0070000)
+
+/* AES transform information word 0 fields */
+#define AES_TFM_BASIC_OUT	cpu_to_le32(0x4 << 0)
+#define AES_TFM_BASIC_IN	cpu_to_le32(0x5 << 0)
+#define AES_TFM_GCM_OUT		cpu_to_le32(0x6 << 0)
+#define AES_TFM_GCM_IN		cpu_to_le32(0xf << 0)
+#define AES_TFM_SIZE(x)		cpu_to_le32((x) << 8)
+#define AES_TFM_128BITS		cpu_to_le32(0xb << 16)
+#define AES_TFM_192BITS		cpu_to_le32(0xd << 16)
+#define AES_TFM_256BITS		cpu_to_le32(0xf << 16)
+#define AES_TFM_GHASH_DIGEST	cpu_to_le32(0x2 << 21)
+#define AES_TFM_GHASH		cpu_to_le32(0x4 << 23)
+/* AES transform information word 1 fields */
+#define AES_TFM_ECB		cpu_to_le32(0x0 << 0)
+#define AES_TFM_CBC		cpu_to_le32(0x1 << 0)
+#define AES_TFM_CTR_INIT	cpu_to_le32(0x2 << 0)	/* init counter to 1 */
+#define AES_TFM_CTR_LOAD	cpu_to_le32(0x6 << 0)	/* load/reuse counter */
+#define AES_TFM_3IV		cpu_to_le32(0x7 << 5)	/* using IV 0-2 */
+#define AES_TFM_FULL_IV		cpu_to_le32(0xf << 5)	/* using IV 0-3 */
+#define AES_TFM_IV_CTR_MODE	cpu_to_le32(0x1 << 10)
+#define AES_TFM_ENC_HASH	cpu_to_le32(0x1 << 17)
+
+/* AES flags */
+#define AES_FLAGS_CIPHER_MSK	GENMASK(2, 0)
+#define AES_FLAGS_ECB		BIT(0)
+#define AES_FLAGS_CBC		BIT(1)
+#define AES_FLAGS_CTR		BIT(2)
+#define AES_FLAGS_GCM		BIT(3)
+#define AES_FLAGS_ENCRYPT	BIT(4)
+#define AES_FLAGS_BUSY		BIT(5)
+
+#define AES_AUTH_TAG_ERR	cpu_to_le32(BIT(26))
+
+/**
+ * mtk_aes_info - hardware information of AES
+ * @cmd:	command token, hardware instruction
+ * @tfm:	transform state of cipher algorithm.
+ * @state:	contains keys and initial vectors.
+ *
+ * Memory layout of GCM buffer:
+ * /-----------\
+ * |  AES KEY  | 128/196/256 bits
+ * |-----------|
+ * |  HASH KEY | a string 128 zero bits encrypted using the block cipher
+ * |-----------|
+ * |    IVs    | 4 * 4 bytes
+ * \-----------/
+ *
+ * The engine requires all these info to do:
+ * - Commands decoding and control of the engine's data path.
+ * - Coordinating hardware data fetch and store operations.
+ * - Result token construction and output.
+ */
+struct mtk_aes_info {
+	__le32 cmd[AES_MAX_CT_SIZE];
+	__le32 tfm[2];
+	__le32 state[AES_MAX_STATE_BUF_SIZE];
+};
+
+struct mtk_aes_reqctx {
+	u64 mode;
+};
+
+struct mtk_aes_base_ctx {
+	struct mtk_cryp *cryp;
+	u32 keylen;
+	__le32 keymode;
+
+	mtk_aes_fn start;
+
+	struct mtk_aes_info info;
+	dma_addr_t ct_dma;
+	dma_addr_t tfm_dma;
+
+	__le32 ct_hdr;
+	u32 ct_size;
+};
+
+struct mtk_aes_ctx {
+	struct mtk_aes_base_ctx	base;
+};
+
+struct mtk_aes_ctr_ctx {
+	struct mtk_aes_base_ctx base;
+
+	u32	iv[AES_BLOCK_SIZE / sizeof(u32)];
+	size_t offset;
+	struct scatterlist src[2];
+	struct scatterlist dst[2];
+};
+
+struct mtk_aes_gcm_ctx {
+	struct mtk_aes_base_ctx base;
+
+	u32 authsize;
+	size_t textlen;
+
+	struct crypto_skcipher *ctr;
+};
+
+struct mtk_aes_drv {
+	struct list_head dev_list;
+	/* Device list lock */
+	spinlock_t lock;
+};
+
+static struct mtk_aes_drv mtk_aes = {
+	.dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
+};
+
+static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
+{
+	return readl_relaxed(cryp->base + offset);
+}
+
+static inline void mtk_aes_write(struct mtk_cryp *cryp,
+				 u32 offset, u32 value)
+{
+	writel_relaxed(value, cryp->base + offset);
+}
+
+static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
+{
+	struct mtk_cryp *cryp = NULL;
+	struct mtk_cryp *tmp;
+
+	spin_lock_bh(&mtk_aes.lock);
+	if (!ctx->cryp) {
+		list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
+			cryp = tmp;
+			break;
+		}
+		ctx->cryp = cryp;
+	} else {
+		cryp = ctx->cryp;
+	}
+	spin_unlock_bh(&mtk_aes.lock);
+
+	return cryp;
+}
+
+static inline size_t mtk_aes_padlen(size_t len)
+{
+	len &= AES_BLOCK_SIZE - 1;
+	return len ? AES_BLOCK_SIZE - len : 0;
+}
+
+static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
+				  struct mtk_aes_dma *dma)
+{
+	int nents;
+
+	if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
+		return false;
+
+	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
+		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+			return false;
+
+		if (len <= sg->length) {
+			if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
+				return false;
+
+			dma->nents = nents + 1;
+			dma->remainder = sg->length - len;
+			sg->length = len;
+			return true;
+		}
+
+		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+			return false;
+
+		len -= sg->length;
+	}
+
+	return false;
+}
+
+static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
+				    const struct mtk_aes_reqctx *rctx)
+{
+	/* Clear all but persistent flags and set request flags. */
+	aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
+}
+
+static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
+{
+	struct scatterlist *sg = dma->sg;
+	int nents = dma->nents;
+
+	if (!dma->remainder)
+		return;
+
+	while (--nents > 0 && sg)
+		sg = sg_next(sg);
+
+	if (!sg)
+		return;
+
+	sg->length += dma->remainder;
+}
+
+static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
+{
+	int i;
+
+	for (i = 0; i < SIZE_IN_WORDS(size); i++)
+		dst[i] = cpu_to_le32(src[i]);
+}
+
+static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
+{
+	int i;
+
+	for (i = 0; i < SIZE_IN_WORDS(size); i++)
+		dst[i] = cpu_to_be32(src[i]);
+}
+
+static inline int mtk_aes_complete(struct mtk_cryp *cryp,
+				   struct mtk_aes_rec *aes,
+				   int err)
+{
+	aes->flags &= ~AES_FLAGS_BUSY;
+	aes->areq->complete(aes->areq, err);
+	/* Handle new request */
+	tasklet_schedule(&aes->queue_task);
+	return err;
+}
+
+/*
+ * Write descriptors for processing. This will configure the engine, load
+ * the transform information and then start the packet processing.
+ */
+static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+{
+	struct mtk_ring *ring = cryp->ring[aes->id];
+	struct mtk_desc *cmd = NULL, *res = NULL;
+	struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
+	u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
+	int nents;
+
+	/* Write command descriptors */
+	for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
+		cmd = ring->cmd_next;
+		cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
+		cmd->buf = cpu_to_le32(sg_dma_address(ssg));
+
+		if (nents == 0) {
+			cmd->hdr |= MTK_DESC_FIRST |
+				    MTK_DESC_CT_LEN(aes->ctx->ct_size);
+			cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
+			cmd->ct_hdr = aes->ctx->ct_hdr;
+			cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
+		}
+
+		/* Shift ring buffer and check boundary */
+		if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
+			ring->cmd_next = ring->cmd_base;
+	}
+	cmd->hdr |= MTK_DESC_LAST;
+
+	/* Prepare result descriptors */
+	for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
+		res = ring->res_next;
+		res->hdr = MTK_DESC_BUF_LEN(dsg->length);
+		res->buf = cpu_to_le32(sg_dma_address(dsg));
+
+		if (nents == 0)
+			res->hdr |= MTK_DESC_FIRST;
+
+		/* Shift ring buffer and check boundary */
+		if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
+			ring->res_next = ring->res_base;
+	}
+	res->hdr |= MTK_DESC_LAST;
+
+	/* Pointer to current result descriptor */
+	ring->res_prev = res;
+
+	/* Prepare enough space for authenticated tag */
+	if (aes->flags & AES_FLAGS_GCM)
+		res->hdr += AES_BLOCK_SIZE;
+
+	/*
+	 * Make sure that all changes to the DMA ring are done before we
+	 * start engine.
+	 */
+	wmb();
+	/* Start DMA transfer */
+	mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
+	mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
+
+	return -EINPROGRESS;
+}
+
+static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+{
+	struct mtk_aes_base_ctx *ctx = aes->ctx;
+
+	dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
+			 DMA_TO_DEVICE);
+
+	if (aes->src.sg == aes->dst.sg) {
+		dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
+			     DMA_BIDIRECTIONAL);
+
+		if (aes->src.sg != &aes->aligned_sg)
+			mtk_aes_restore_sg(&aes->src);
+	} else {
+		dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
+			     DMA_FROM_DEVICE);
+
+		if (aes->dst.sg != &aes->aligned_sg)
+			mtk_aes_restore_sg(&aes->dst);
+
+		dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
+			     DMA_TO_DEVICE);
+
+		if (aes->src.sg != &aes->aligned_sg)
+			mtk_aes_restore_sg(&aes->src);
+	}
+
+	if (aes->dst.sg == &aes->aligned_sg)
+		sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
+				    aes->buf, aes->total);
+}
+
+static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+{
+	struct mtk_aes_base_ctx *ctx = aes->ctx;
+	struct mtk_aes_info *info = &ctx->info;
+
+	ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
+				     DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
+		goto exit;
+
+	ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
+
+	if (aes->src.sg == aes->dst.sg) {
+		aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
+					     aes->src.nents,
+					     DMA_BIDIRECTIONAL);
+		aes->dst.sg_len = aes->src.sg_len;
+		if (unlikely(!aes->src.sg_len))
+			goto sg_map_err;
+	} else {
+		aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
+					     aes->src.nents, DMA_TO_DEVICE);
+		if (unlikely(!aes->src.sg_len))
+			goto sg_map_err;
+
+		aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
+					     aes->dst.nents, DMA_FROM_DEVICE);
+		if (unlikely(!aes->dst.sg_len)) {
+			dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
+				     DMA_TO_DEVICE);
+			goto sg_map_err;
+		}
+	}
+
+	return mtk_aes_xmit(cryp, aes);
+
+sg_map_err:
+	dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
+exit:
+	return mtk_aes_complete(cryp, aes, -EINVAL);
+}
+
+/* Initialize transform information of CBC/ECB/CTR mode */
+static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
+			      size_t len)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+	struct mtk_aes_base_ctx *ctx = aes->ctx;
+	struct mtk_aes_info *info = &ctx->info;
+	u32 cnt = 0;
+
+	ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
+	info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
+	info->cmd[cnt++] = AES_CMD1;
+
+	info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
+	if (aes->flags & AES_FLAGS_ENCRYPT)
+		info->tfm[0] |= AES_TFM_BASIC_OUT;
+	else
+		info->tfm[0] |= AES_TFM_BASIC_IN;
+
+	switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
+	case AES_FLAGS_CBC:
+		info->tfm[1] = AES_TFM_CBC;
+		break;
+	case AES_FLAGS_ECB:
+		info->tfm[1] = AES_TFM_ECB;
+		goto ecb;
+	case AES_FLAGS_CTR:
+		info->tfm[1] = AES_TFM_CTR_LOAD;
+		goto ctr;
+
+	default:
+		/* Should not happen... */
+		return;
+	}
+
+	mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
+			       AES_BLOCK_SIZE);
+ctr:
+	info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
+	info->tfm[1] |= AES_TFM_FULL_IV;
+	info->cmd[cnt++] = AES_CMD2;
+ecb:
+	ctx->ct_size = cnt;
+}
+
+static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
+		       struct scatterlist *src, struct scatterlist *dst,
+		       size_t len)
+{
+	size_t padlen = 0;
+	bool src_aligned, dst_aligned;
+
+	aes->total = len;
+	aes->src.sg = src;
+	aes->dst.sg = dst;
+	aes->real_dst = dst;
+
+	src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
+	if (src == dst)
+		dst_aligned = src_aligned;
+	else
+		dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
+
+	if (!src_aligned || !dst_aligned) {
+		padlen = mtk_aes_padlen(len);
+
+		if (len + padlen > AES_BUF_SIZE)
+			return mtk_aes_complete(cryp, aes, -ENOMEM);
+
+		if (!src_aligned) {
+			sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
+			aes->src.sg = &aes->aligned_sg;
+			aes->src.nents = 1;
+			aes->src.remainder = 0;
+		}
+
+		if (!dst_aligned) {
+			aes->dst.sg = &aes->aligned_sg;
+			aes->dst.nents = 1;
+			aes->dst.remainder = 0;
+		}
+
+		sg_init_table(&aes->aligned_sg, 1);
+		sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
+	}
+
+	mtk_aes_info_init(cryp, aes, len + padlen);
+
+	return mtk_aes_map(cryp, aes);
+}
+
+static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
+				struct crypto_async_request *new_areq)
+{
+	struct mtk_aes_rec *aes = cryp->aes[id];
+	struct crypto_async_request *areq, *backlog;
+	struct mtk_aes_base_ctx *ctx;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&aes->lock, flags);
+	if (new_areq)
+		ret = crypto_enqueue_request(&aes->queue, new_areq);
+	if (aes->flags & AES_FLAGS_BUSY) {
+		spin_unlock_irqrestore(&aes->lock, flags);
+		return ret;
+	}
+	backlog = crypto_get_backlog(&aes->queue);
+	areq = crypto_dequeue_request(&aes->queue);
+	if (areq)
+		aes->flags |= AES_FLAGS_BUSY;
+	spin_unlock_irqrestore(&aes->lock, flags);
+
+	if (!areq)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	ctx = crypto_tfm_ctx(areq->tfm);
+
+	aes->areq = areq;
+	aes->ctx = ctx;
+
+	return ctx->start(cryp, aes);
+}
+
+static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
+				     struct mtk_aes_rec *aes)
+{
+	return mtk_aes_complete(cryp, aes, 0);
+}
+
+static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+	struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+
+	mtk_aes_set_mode(aes, rctx);
+	aes->resume = mtk_aes_transfer_complete;
+
+	return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
+}
+
+static inline struct mtk_aes_ctr_ctx *
+mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
+{
+	return container_of(ctx, struct mtk_aes_ctr_ctx, base);
+}
+
+static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+{
+	struct mtk_aes_base_ctx *ctx = aes->ctx;
+	struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
+	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+	struct scatterlist *src, *dst;
+	u32 start, end, ctr, blocks;
+	size_t datalen;
+	bool fragmented = false;
+
+	/* Check for transfer completion. */
+	cctx->offset += aes->total;
+	if (cctx->offset >= req->nbytes)
+		return mtk_aes_transfer_complete(cryp, aes);
+
+	/* Compute data length. */
+	datalen = req->nbytes - cctx->offset;
+	blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
+	ctr = be32_to_cpu(cctx->iv[3]);
+
+	/* Check 32bit counter overflow. */
+	start = ctr;
+	end = start + blocks - 1;
+	if (end < start) {
+		ctr |= 0xffffffff;
+		datalen = AES_BLOCK_SIZE * -start;
+		fragmented = true;
+	}
+
+	/* Jump to offset. */
+	src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
+	dst = ((req->src == req->dst) ? src :
+	       scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
+
+	/* Write IVs into transform state buffer. */
+	mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
+			       AES_BLOCK_SIZE);
+
+	if (unlikely(fragmented)) {
+	/*
+	 * Increment the counter manually to cope with the hardware
+	 * counter overflow.
+	 */
+		cctx->iv[3] = cpu_to_be32(ctr);
+		crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
+	}
+
+	return mtk_aes_dma(cryp, aes, src, dst, datalen);
+}
+
+static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+{
+	struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
+	struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
+	struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+
+	mtk_aes_set_mode(aes, rctx);
+
+	memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
+	cctx->offset = 0;
+	aes->total = 0;
+	aes->resume = mtk_aes_ctr_transfer;
+
+	return mtk_aes_ctr_transfer(cryp, aes);
+}
+
+/* Check and set the AES key to transform state buffer */
+static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
+			  const u8 *key, u32 keylen)
+{
+	struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		ctx->keymode = AES_TFM_128BITS;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->keymode = AES_TFM_192BITS;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->keymode = AES_TFM_256BITS;
+		break;
+
+	default:
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx->keylen = SIZE_IN_WORDS(keylen);
+	mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
+
+	return 0;
+}
+
+static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
+{
+	struct mtk_aes_base_ctx *ctx;
+	struct mtk_aes_reqctx *rctx;
+
+	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+	rctx = ablkcipher_request_ctx(req);
+	rctx->mode = mode;
+
+	return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
+				    &req->base);
+}
+
+static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
+}
+
+static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return mtk_aes_crypt(req, AES_FLAGS_ECB);
+}
+
+static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
+}
+
+static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return mtk_aes_crypt(req, AES_FLAGS_CBC);
+}
+
+static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+	return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
+}
+
+static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+	return mtk_aes_crypt(req, AES_FLAGS_CTR);
+}
+
+static int mtk_aes_cra_init(struct crypto_tfm *tfm)
+{
+	struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct mtk_cryp *cryp = NULL;
+
+	cryp = mtk_aes_find_dev(&ctx->base);
+	if (!cryp) {
+		pr_err("can't find crypto device\n");
+		return -ENODEV;
+	}
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
+	ctx->base.start = mtk_aes_start;
+	return 0;
+}
+
+static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct mtk_cryp *cryp = NULL;
+
+	cryp = mtk_aes_find_dev(&ctx->base);
+	if (!cryp) {
+		pr_err("can't find crypto device\n");
+		return -ENODEV;
+	}
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
+	ctx->base.start = mtk_aes_ctr_start;
+	return 0;
+}
+
+static struct crypto_alg aes_algs[] = {
+{
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "cbc-aes-mtk",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_init		= mtk_aes_cra_init,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct mtk_aes_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.setkey		= mtk_aes_setkey,
+		.encrypt	= mtk_aes_cbc_encrypt,
+		.decrypt	= mtk_aes_cbc_decrypt,
+		.ivsize		= AES_BLOCK_SIZE,
+	}
+},
+{
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "ecb-aes-mtk",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_init		= mtk_aes_cra_init,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct mtk_aes_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.setkey		= mtk_aes_setkey,
+		.encrypt	= mtk_aes_ecb_encrypt,
+		.decrypt	= mtk_aes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "ctr(aes)",
+	.cra_driver_name	= "ctr-aes-mtk",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_init		= mtk_aes_ctr_cra_init,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct mtk_aes_ctr_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= mtk_aes_setkey,
+		.encrypt	= mtk_aes_ctr_encrypt,
+		.decrypt	= mtk_aes_ctr_decrypt,
+	}
+},
+};
+
+static inline struct mtk_aes_gcm_ctx *
+mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
+{
+	return container_of(ctx, struct mtk_aes_gcm_ctx, base);
+}
+
+/*
+ * Engine will verify and compare tag automatically, so we just need
+ * to check returned status which stored in the result descriptor.
+ */
+static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
+				  struct mtk_aes_rec *aes)
+{
+	u32 status = cryp->ring[aes->id]->res_prev->ct;
+
+	return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
+				-EBADMSG : 0);
+}
+
+/* Initialize transform information of GCM mode */
+static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
+				  struct mtk_aes_rec *aes,
+				  size_t len)
+{
+	struct aead_request *req = aead_request_cast(aes->areq);
+	struct mtk_aes_base_ctx *ctx = aes->ctx;
+	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
+	struct mtk_aes_info *info = &ctx->info;
+	u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
+	u32 cnt = 0;
+
+	ctx->ct_hdr = AES_CT_CTRL_HDR | len;
+
+	info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
+	info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
+	info->cmd[cnt++] = AES_GCM_CMD2;
+	info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
+
+	if (aes->flags & AES_FLAGS_ENCRYPT) {
+		info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
+		info->tfm[0] = AES_TFM_GCM_OUT;
+	} else {
+		info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
+		info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
+		info->tfm[0] = AES_TFM_GCM_IN;
+	}
+	ctx->ct_size = cnt;
+
+	info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
+			ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
+			ctx->keymode;
+	info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
+		       AES_TFM_ENC_HASH;
+
+	mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
+			       AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
+}
+
+static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
+			   struct scatterlist *src, struct scatterlist *dst,
+			   size_t len)
+{
+	bool src_aligned, dst_aligned;
+
+	aes->src.sg = src;
+	aes->dst.sg = dst;
+	aes->real_dst = dst;
+
+	src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
+	if (src == dst)
+		dst_aligned = src_aligned;
+	else
+		dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
+
+	if (!src_aligned || !dst_aligned) {
+		if (aes->total > AES_BUF_SIZE)
+			return mtk_aes_complete(cryp, aes, -ENOMEM);
+
+		if (!src_aligned) {
+			sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
+			aes->src.sg = &aes->aligned_sg;
+			aes->src.nents = 1;
+			aes->src.remainder = 0;
+		}
+
+		if (!dst_aligned) {
+			aes->dst.sg = &aes->aligned_sg;
+			aes->dst.nents = 1;
+			aes->dst.remainder = 0;
+		}
+
+		sg_init_table(&aes->aligned_sg, 1);
+		sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
+	}
+
+	mtk_aes_gcm_info_init(cryp, aes, len);
+
+	return mtk_aes_map(cryp, aes);
+}
+
+/* Todo: GMAC */
+static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
+{
+	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
+	struct aead_request *req = aead_request_cast(aes->areq);
+	struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
+	u32 len = req->assoclen + req->cryptlen;
+
+	mtk_aes_set_mode(aes, rctx);
+
+	if (aes->flags & AES_FLAGS_ENCRYPT) {
+		u32 tag[4];
+
+		aes->resume = mtk_aes_transfer_complete;
+		/* Compute total process length. */
+		aes->total = len + gctx->authsize;
+		/* Compute text length. */
+		gctx->textlen = req->cryptlen;
+		/* Hardware will append authenticated tag to output buffer */
+		scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
+	} else {
+		aes->resume = mtk_aes_gcm_tag_verify;
+		aes->total = len;
+		gctx->textlen = req->cryptlen - gctx->authsize;
+	}
+
+	return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
+}
+
+static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
+{
+	struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
+	struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
+
+	/* Empty messages are not supported yet */
+	if (!gctx->textlen && !req->assoclen)
+		return -EINVAL;
+
+	rctx->mode = AES_FLAGS_GCM | mode;
+
+	return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
+				    &req->base);
+}
+
+/*
+ * Because of the hardware limitation, we need to pre-calculate key(H)
+ * for the GHASH operation. The result of the encryption operation
+ * need to be stored in the transform state buffer.
+ */
+static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+			      u32 keylen)
+{
+	struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
+	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
+	struct crypto_skcipher *ctr = gctx->ctr;
+	struct {
+		u32 hash[4];
+		u8 iv[8];
+
+		struct crypto_wait wait;
+
+		struct scatterlist sg[1];
+		struct skcipher_request req;
+	} *data;
+	int err;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		ctx->keymode = AES_TFM_128BITS;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->keymode = AES_TFM_192BITS;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->keymode = AES_TFM_256BITS;
+		break;
+
+	default:
+		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx->keylen = SIZE_IN_WORDS(keylen);
+
+	/* Same as crypto_gcm_setkey() from crypto/gcm.c */
+	crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
+				  CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(ctr, key, keylen);
+	crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
+			      CRYPTO_TFM_RES_MASK);
+	if (err)
+		return err;
+
+	data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
+		       GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	crypto_init_wait(&data->wait);
+	sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
+	skcipher_request_set_tfm(&data->req, ctr);
+	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+				      CRYPTO_TFM_REQ_MAY_BACKLOG,
+				      crypto_req_done, &data->wait);
+	skcipher_request_set_crypt(&data->req, data->sg, data->sg,
+				   AES_BLOCK_SIZE, data->iv);
+
+	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+			      &data->wait);
+	if (err)
+		goto out;
+
+	/* Write key into state buffer */
+	mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
+	/* Write key(H) into state buffer */
+	mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
+			       AES_BLOCK_SIZE);
+out:
+	kzfree(data);
+	return err;
+}
+
+static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
+				   u32 authsize)
+{
+	struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
+	struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
+
+	/* Same as crypto_gcm_authsize() from crypto/gcm.c */
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	gctx->authsize = authsize;
+	return 0;
+}
+
+static int mtk_aes_gcm_encrypt(struct aead_request *req)
+{
+	return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
+}
+
+static int mtk_aes_gcm_decrypt(struct aead_request *req)
+{
+	return mtk_aes_gcm_crypt(req, 0);
+}
+
+static int mtk_aes_gcm_init(struct crypto_aead *aead)
+{
+	struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
+	struct mtk_cryp *cryp = NULL;
+
+	cryp = mtk_aes_find_dev(&ctx->base);
+	if (!cryp) {
+		pr_err("can't find crypto device\n");
+		return -ENODEV;
+	}
+
+	ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
+					 CRYPTO_ALG_ASYNC);
+	if (IS_ERR(ctx->ctr)) {
+		pr_err("Error allocating ctr(aes)\n");
+		return PTR_ERR(ctx->ctr);
+	}
+
+	crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
+	ctx->base.start = mtk_aes_gcm_start;
+	return 0;
+}
+
+static void mtk_aes_gcm_exit(struct crypto_aead *aead)
+{
+	struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
+
+	crypto_free_skcipher(ctx->ctr);
+}
+
+static struct aead_alg aes_gcm_alg = {
+	.setkey		= mtk_aes_gcm_setkey,
+	.setauthsize	= mtk_aes_gcm_setauthsize,
+	.encrypt	= mtk_aes_gcm_encrypt,
+	.decrypt	= mtk_aes_gcm_decrypt,
+	.init		= mtk_aes_gcm_init,
+	.exit		= mtk_aes_gcm_exit,
+	.ivsize		= GCM_AES_IV_SIZE,
+	.maxauthsize	= AES_BLOCK_SIZE,
+
+	.base = {
+		.cra_name		= "gcm(aes)",
+		.cra_driver_name	= "gcm-aes-mtk",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct mtk_aes_gcm_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+};
+
+static void mtk_aes_queue_task(unsigned long data)
+{
+	struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
+
+	mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
+}
+
+static void mtk_aes_done_task(unsigned long data)
+{
+	struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
+	struct mtk_cryp *cryp = aes->cryp;
+
+	mtk_aes_unmap(cryp, aes);
+	aes->resume(cryp, aes);
+}
+
+static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
+{
+	struct mtk_aes_rec *aes  = (struct mtk_aes_rec *)dev_id;
+	struct mtk_cryp *cryp = aes->cryp;
+	u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
+
+	mtk_aes_write(cryp, RDR_STAT(aes->id), val);
+
+	if (likely(AES_FLAGS_BUSY & aes->flags)) {
+		mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
+		mtk_aes_write(cryp, RDR_THRESH(aes->id),
+			      MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
+
+		tasklet_schedule(&aes->done_task);
+	} else {
+		dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
+	}
+	return IRQ_HANDLED;
+}
+
+/*
+ * The purpose of creating encryption and decryption records is
+ * to process outbound/inbound data in parallel, it can improve
+ * performance in most use cases, such as IPSec VPN, especially
+ * under heavy network traffic.
+ */
+static int mtk_aes_record_init(struct mtk_cryp *cryp)
+{
+	struct mtk_aes_rec **aes = cryp->aes;
+	int i, err = -ENOMEM;
+
+	for (i = 0; i < MTK_REC_NUM; i++) {
+		aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
+		if (!aes[i])
+			goto err_cleanup;
+
+		aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
+						AES_BUF_ORDER);
+		if (!aes[i]->buf)
+			goto err_cleanup;
+
+		aes[i]->cryp = cryp;
+
+		spin_lock_init(&aes[i]->lock);
+		crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
+
+		tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
+			     (unsigned long)aes[i]);
+		tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
+			     (unsigned long)aes[i]);
+	}
+
+	/* Link to ring0 and ring1 respectively */
+	aes[0]->id = MTK_RING0;
+	aes[1]->id = MTK_RING1;
+
+	return 0;
+
+err_cleanup:
+	for (; i--; ) {
+		free_page((unsigned long)aes[i]->buf);
+		kfree(aes[i]);
+	}
+
+	return err;
+}
+
+static void mtk_aes_record_free(struct mtk_cryp *cryp)
+{
+	int i;
+
+	for (i = 0; i < MTK_REC_NUM; i++) {
+		tasklet_kill(&cryp->aes[i]->done_task);
+		tasklet_kill(&cryp->aes[i]->queue_task);
+
+		free_page((unsigned long)cryp->aes[i]->buf);
+		kfree(cryp->aes[i]);
+	}
+}
+
+static void mtk_aes_unregister_algs(void)
+{
+	int i;
+
+	crypto_unregister_aead(&aes_gcm_alg);
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+		crypto_unregister_alg(&aes_algs[i]);
+}
+
+static int mtk_aes_register_algs(void)
+{
+	int err, i;
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+		err = crypto_register_alg(&aes_algs[i]);
+		if (err)
+			goto err_aes_algs;
+	}
+
+	err = crypto_register_aead(&aes_gcm_alg);
+	if (err)
+		goto err_aes_algs;
+
+	return 0;
+
+err_aes_algs:
+	for (; i--; )
+		crypto_unregister_alg(&aes_algs[i]);
+
+	return err;
+}
+
+int mtk_cipher_alg_register(struct mtk_cryp *cryp)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&cryp->aes_list);
+
+	/* Initialize two cipher records */
+	ret = mtk_aes_record_init(cryp);
+	if (ret)
+		goto err_record;
+
+	ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
+			       0, "mtk-aes", cryp->aes[0]);
+	if (ret) {
+		dev_err(cryp->dev, "unable to request AES irq.\n");
+		goto err_res;
+	}
+
+	ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
+			       0, "mtk-aes", cryp->aes[1]);
+	if (ret) {
+		dev_err(cryp->dev, "unable to request AES irq.\n");
+		goto err_res;
+	}
+
+	/* Enable ring0 and ring1 interrupt */
+	mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
+	mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
+
+	spin_lock(&mtk_aes.lock);
+	list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
+	spin_unlock(&mtk_aes.lock);
+
+	ret = mtk_aes_register_algs();
+	if (ret)
+		goto err_algs;
+
+	return 0;
+
+err_algs:
+	spin_lock(&mtk_aes.lock);
+	list_del(&cryp->aes_list);
+	spin_unlock(&mtk_aes.lock);
+err_res:
+	mtk_aes_record_free(cryp);
+err_record:
+
+	dev_err(cryp->dev, "mtk-aes initialization failed.\n");
+	return ret;
+}
+
+void mtk_cipher_alg_release(struct mtk_cryp *cryp)
+{
+	spin_lock(&mtk_aes.lock);
+	list_del(&cryp->aes_list);
+	spin_unlock(&mtk_aes.lock);
+
+	mtk_aes_unregister_algs();
+	mtk_aes_record_free(cryp);
+}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
new file mode 100644
index 0000000..ee0404e
--- /dev/null
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -0,0 +1,599 @@
+/*
+ * Driver for EIP97 cryptographic accelerator.
+ *
+ * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include "mtk-platform.h"
+
+#define MTK_BURST_SIZE_MSK		GENMASK(7, 4)
+#define MTK_BURST_SIZE(x)		((x) << 4)
+#define MTK_DESC_SIZE(x)		((x) << 0)
+#define MTK_DESC_OFFSET(x)		((x) << 16)
+#define MTK_DESC_FETCH_SIZE(x)		((x) << 0)
+#define MTK_DESC_FETCH_THRESH(x)	((x) << 16)
+#define MTK_DESC_OVL_IRQ_EN		BIT(25)
+#define MTK_DESC_ATP_PRESENT		BIT(30)
+
+#define MTK_DFSE_IDLE			GENMASK(3, 0)
+#define MTK_DFSE_THR_CTRL_EN		BIT(30)
+#define MTK_DFSE_THR_CTRL_RESET		BIT(31)
+#define MTK_DFSE_RING_ID(x)		(((x) >> 12) & GENMASK(3, 0))
+#define MTK_DFSE_MIN_DATA(x)		((x) << 0)
+#define MTK_DFSE_MAX_DATA(x)		((x) << 8)
+#define MTK_DFE_MIN_CTRL(x)		((x) << 16)
+#define MTK_DFE_MAX_CTRL(x)		((x) << 24)
+
+#define MTK_IN_BUF_MIN_THRESH(x)	((x) << 8)
+#define MTK_IN_BUF_MAX_THRESH(x)	((x) << 12)
+#define MTK_OUT_BUF_MIN_THRESH(x)	((x) << 0)
+#define MTK_OUT_BUF_MAX_THRESH(x)	((x) << 4)
+#define MTK_IN_TBUF_SIZE(x)		(((x) >> 4) & GENMASK(3, 0))
+#define MTK_IN_DBUF_SIZE(x)		(((x) >> 8) & GENMASK(3, 0))
+#define MTK_OUT_DBUF_SIZE(x)		(((x) >> 16) & GENMASK(3, 0))
+#define MTK_CMD_FIFO_SIZE(x)		(((x) >> 8) & GENMASK(3, 0))
+#define MTK_RES_FIFO_SIZE(x)		(((x) >> 12) & GENMASK(3, 0))
+
+#define MTK_PE_TK_LOC_AVL		BIT(2)
+#define MTK_PE_PROC_HELD		BIT(14)
+#define MTK_PE_TK_TIMEOUT_EN		BIT(22)
+#define MTK_PE_INPUT_DMA_ERR		BIT(0)
+#define MTK_PE_OUTPUT_DMA_ERR		BIT(1)
+#define MTK_PE_PKT_PORC_ERR		BIT(2)
+#define MTK_PE_PKT_TIMEOUT		BIT(3)
+#define MTK_PE_FATAL_ERR		BIT(14)
+#define MTK_PE_INPUT_DMA_ERR_EN		BIT(16)
+#define MTK_PE_OUTPUT_DMA_ERR_EN	BIT(17)
+#define MTK_PE_PKT_PORC_ERR_EN		BIT(18)
+#define MTK_PE_PKT_TIMEOUT_EN		BIT(19)
+#define MTK_PE_FATAL_ERR_EN		BIT(30)
+#define MTK_PE_INT_OUT_EN		BIT(31)
+
+#define MTK_HIA_SIGNATURE		((u16)0x35ca)
+#define MTK_HIA_DATA_WIDTH(x)		(((x) >> 25) & GENMASK(1, 0))
+#define MTK_HIA_DMA_LENGTH(x)		(((x) >> 20) & GENMASK(4, 0))
+#define MTK_CDR_STAT_CLR		GENMASK(4, 0)
+#define MTK_RDR_STAT_CLR		GENMASK(7, 0)
+
+#define MTK_AIC_INT_MSK			GENMASK(5, 0)
+#define MTK_AIC_VER_MSK			(GENMASK(15, 0) | GENMASK(27, 20))
+#define MTK_AIC_VER11			0x011036c9
+#define MTK_AIC_VER12			0x012036c9
+#define MTK_AIC_G_CLR			GENMASK(30, 20)
+
+/**
+ * EIP97 is an integrated security subsystem to accelerate cryptographic
+ * functions and protocols to offload the host processor.
+ * Some important hardware modules are briefly introduced below:
+ *
+ * Host Interface Adapter(HIA) - the main interface between the host
+ * system and the hardware subsystem. It is responsible for attaching
+ * processing engine to the specific host bus interface and provides a
+ * standardized software view for off loading tasks to the engine.
+ *
+ * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many
+ * CD the host has prepared in the CDR. It monitors the fill level of its
+ * CD-FIFO and if there's sufficient space for the next block of descriptors,
+ * then it fires off a DMA request to fetch a block of CDs.
+ *
+ * Data fetch engine(DFE) - It is responsible for parsing the CD and
+ * setting up the required control and packet data DMA transfers from
+ * system memory to the processing engine.
+ *
+ * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager,
+ * but target is result descriptors, Moreover, it also handles the RD
+ * updates under control of the DSE. For each packet data segment
+ * processed, the DSE triggers the RDR Manager to write the updated RD.
+ * If triggered to update, the RDR Manager sets up a DMA operation to
+ * copy the RD from the DSE to the correct location in the RDR.
+ *
+ * Data Store Engine(DSE) - It is responsible for parsing the prepared RD
+ * and setting up the required control and packet data DMA transfers from
+ * the processing engine to system memory.
+ *
+ * Advanced Interrupt Controllers(AICs) - receive interrupt request signals
+ * from various sources and combine them into one interrupt output.
+ * The AICs are used by:
+ * - One for the HIA global and processing engine interrupts.
+ * - The others for the descriptor ring interrupts.
+ */
+
+/* Cryptographic engine capabilities */
+struct mtk_sys_cap {
+	/* host interface adapter */
+	u32 hia_ver;
+	u32 hia_opt;
+	/* packet engine */
+	u32 pkt_eng_opt;
+	/* global hardware */
+	u32 hw_opt;
+};
+
+static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask)
+{
+	/* Assign rings to DFE/DSE thread and enable it */
+	writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL);
+	writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL);
+}
+
+static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp,
+				  struct mtk_sys_cap *cap)
+{
+	u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2;
+	u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1;
+	u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len);
+	u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len);
+	u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len);
+
+	writel(MTK_DFSE_MIN_DATA(ipbuf - 1) |
+	       MTK_DFSE_MAX_DATA(ipbuf) |
+	       MTK_DFE_MIN_CTRL(itbuf - 1) |
+	       MTK_DFE_MAX_CTRL(itbuf),
+	       cryp->base + DFE_CFG);
+
+	writel(MTK_DFSE_MIN_DATA(opbuf - 1) |
+	       MTK_DFSE_MAX_DATA(opbuf),
+	       cryp->base + DSE_CFG);
+
+	writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) |
+	       MTK_IN_BUF_MAX_THRESH(ipbuf),
+	       cryp->base + PE_IN_DBUF_THRESH);
+
+	writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) |
+	       MTK_IN_BUF_MAX_THRESH(itbuf),
+	       cryp->base + PE_IN_TBUF_THRESH);
+
+	writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) |
+	       MTK_OUT_BUF_MAX_THRESH(opbuf),
+	       cryp->base + PE_OUT_DBUF_THRESH);
+
+	writel(0, cryp->base + PE_OUT_TBUF_THRESH);
+	writel(0, cryp->base + PE_OUT_BUF_CTRL);
+}
+
+static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp)
+{
+	int ret = -EINVAL;
+	u32 val;
+
+	/* Check for completion of all DMA transfers */
+	val = readl(cryp->base + DFE_THR_STAT);
+	if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) {
+		val = readl(cryp->base + DSE_THR_STAT);
+		if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE)
+			ret = 0;
+	}
+
+	if (!ret) {
+		/* Take DFE/DSE thread out of reset */
+		writel(0, cryp->base + DFE_THR_CTRL);
+		writel(0, cryp->base + DSE_THR_CTRL);
+	} else {
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
+{
+	int err;
+
+	/* Reset DSE/DFE and correct system priorities for all rings. */
+	writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL);
+	writel(0, cryp->base + DFE_PRIO_0);
+	writel(0, cryp->base + DFE_PRIO_1);
+	writel(0, cryp->base + DFE_PRIO_2);
+	writel(0, cryp->base + DFE_PRIO_3);
+
+	writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL);
+	writel(0, cryp->base + DSE_PRIO_0);
+	writel(0, cryp->base + DSE_PRIO_1);
+	writel(0, cryp->base + DSE_PRIO_2);
+	writel(0, cryp->base + DSE_PRIO_3);
+
+	err = mtk_dfe_dse_state_check(cryp);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp,
+				    int i, struct mtk_sys_cap *cap)
+{
+	/* Full descriptor that fits FIFO minus one */
+	u32 count =
+		((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1;
+
+	/* Temporarily disable external triggering */
+	writel(0, cryp->base + CDR_CFG(i));
+
+	/* Clear CDR count */
+	writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i));
+	writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i));
+
+	writel(0, cryp->base + CDR_PREP_PNTR(i));
+	writel(0, cryp->base + CDR_PROC_PNTR(i));
+	writel(0, cryp->base + CDR_DMA_CFG(i));
+
+	/* Configure CDR host address space */
+	writel(0, cryp->base + CDR_BASE_ADDR_HI(i));
+	writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i));
+
+	writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i));
+
+	/* Clear and disable all CDR interrupts */
+	writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i));
+
+	/*
+	 * Set command descriptor offset and enable additional
+	 * token present in descriptor.
+	 */
+	writel(MTK_DESC_SIZE(MTK_DESC_SZ) |
+		   MTK_DESC_OFFSET(MTK_DESC_OFF) |
+	       MTK_DESC_ATP_PRESENT,
+	       cryp->base + CDR_DESC_SIZE(i));
+
+	writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
+		   MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ),
+		   cryp->base + CDR_CFG(i));
+}
+
+static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp,
+				    int i, struct mtk_sys_cap *cap)
+{
+	u32 rndup = 2;
+	u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1;
+
+	/* Temporarily disable external triggering */
+	writel(0, cryp->base + RDR_CFG(i));
+
+	/* Clear RDR count */
+	writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i));
+	writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i));
+
+	writel(0, cryp->base + RDR_PREP_PNTR(i));
+	writel(0, cryp->base + RDR_PROC_PNTR(i));
+	writel(0, cryp->base + RDR_DMA_CFG(i));
+
+	/* Configure RDR host address space */
+	writel(0, cryp->base + RDR_BASE_ADDR_HI(i));
+	writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i));
+
+	writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i));
+	writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i));
+
+	/*
+	 * RDR manager generates update interrupts on a per-completed-packet,
+	 * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count
+	 * for the RDR exceeds the number of packets.
+	 */
+	writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE,
+	       cryp->base + RDR_THRESH(i));
+
+	/*
+	 * Configure a threshold and time-out value for the processed
+	 * result descriptors (or complete packets) that are written to
+	 * the RDR.
+	 */
+	writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF),
+	       cryp->base + RDR_DESC_SIZE(i));
+
+	/*
+	 * Configure HIA fetch size and fetch threshold that are used to
+	 * fetch blocks of multiple descriptors.
+	 */
+	writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
+	       MTK_DESC_FETCH_THRESH(count * rndup) |
+	       MTK_DESC_OVL_IRQ_EN,
+		   cryp->base + RDR_CFG(i));
+}
+
+static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
+{
+	struct mtk_sys_cap cap;
+	int i, err;
+	u32 val;
+
+	cap.hia_ver = readl(cryp->base + HIA_VERSION);
+	cap.hia_opt = readl(cryp->base + HIA_OPTIONS);
+	cap.hw_opt = readl(cryp->base + EIP97_OPTIONS);
+
+	if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE))
+		return -EINVAL;
+
+	/* Configure endianness conversion method for master (DMA) interface */
+	writel(0, cryp->base + EIP97_MST_CTRL);
+
+	/* Set HIA burst size */
+	val = readl(cryp->base + HIA_MST_CTRL);
+	val &= ~MTK_BURST_SIZE_MSK;
+	val |= MTK_BURST_SIZE(5);
+	writel(val, cryp->base + HIA_MST_CTRL);
+
+	err = mtk_dfe_dse_reset(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Failed to reset DFE and DSE.\n");
+		return err;
+	}
+
+	mtk_dfe_dse_buf_setup(cryp, &cap);
+
+	/* Enable the 4 rings for the packet engines. */
+	mtk_desc_ring_link(cryp, 0xf);
+
+	for (i = 0; i < MTK_RING_MAX; i++) {
+		mtk_cmd_desc_ring_setup(cryp, i, &cap);
+		mtk_res_desc_ring_setup(cryp, i, &cap);
+	}
+
+	writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN,
+	       cryp->base + PE_TOKEN_CTRL_STAT);
+
+	/* Clear all pending interrupts */
+	writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK);
+	writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR |
+	       MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT |
+	       MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN |
+	       MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN |
+	       MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN |
+	       MTK_PE_INT_OUT_EN,
+	       cryp->base + PE_INTERRUPT_CTRL_STAT);
+
+	return 0;
+}
+
+static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
+{
+	u32 val;
+
+	if (hw == MTK_RING_MAX)
+		val = readl(cryp->base + AIC_G_VERSION);
+	else
+		val = readl(cryp->base + AIC_VERSION(hw));
+
+	val &= MTK_AIC_VER_MSK;
+	if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
+		return -ENXIO;
+
+	if (hw == MTK_RING_MAX)
+		val = readl(cryp->base + AIC_G_OPTIONS);
+	else
+		val = readl(cryp->base + AIC_OPTIONS(hw));
+
+	val &= MTK_AIC_INT_MSK;
+	if (!val || val > 32)
+		return -ENXIO;
+
+	return 0;
+}
+
+static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
+{
+	int err;
+
+	err = mtk_aic_cap_check(cryp, hw);
+	if (err)
+		return err;
+
+	/* Disable all interrupts and set initial configuration */
+	if (hw == MTK_RING_MAX) {
+		writel(0, cryp->base + AIC_G_ENABLE_CTRL);
+		writel(0, cryp->base + AIC_G_POL_CTRL);
+		writel(0, cryp->base + AIC_G_TYPE_CTRL);
+		writel(0, cryp->base + AIC_G_ENABLE_SET);
+	} else {
+		writel(0, cryp->base + AIC_ENABLE_CTRL(hw));
+		writel(0, cryp->base + AIC_POL_CTRL(hw));
+		writel(0, cryp->base + AIC_TYPE_CTRL(hw));
+		writel(0, cryp->base + AIC_ENABLE_SET(hw));
+	}
+
+	return 0;
+}
+
+static int mtk_accelerator_init(struct mtk_cryp *cryp)
+{
+	int i, err;
+
+	/* Initialize advanced interrupt controller(AIC) */
+	for (i = 0; i < MTK_IRQ_NUM; i++) {
+		err = mtk_aic_init(cryp, i);
+		if (err) {
+			dev_err(cryp->dev, "Failed to initialize AIC.\n");
+			return err;
+		}
+	}
+
+	/* Initialize packet engine */
+	err = mtk_packet_engine_setup(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Failed to configure packet engine.\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static void mtk_desc_dma_free(struct mtk_cryp *cryp)
+{
+	int i;
+
+	for (i = 0; i < MTK_RING_MAX; i++) {
+		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
+				  cryp->ring[i]->res_base,
+				  cryp->ring[i]->res_dma);
+		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
+				  cryp->ring[i]->cmd_base,
+				  cryp->ring[i]->cmd_dma);
+		kfree(cryp->ring[i]);
+	}
+}
+
+static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
+{
+	struct mtk_ring **ring = cryp->ring;
+	int i, err = ENOMEM;
+
+	for (i = 0; i < MTK_RING_MAX; i++) {
+		ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
+		if (!ring[i])
+			goto err_cleanup;
+
+		ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev,
+					   MTK_DESC_RING_SZ,
+					   &ring[i]->cmd_dma,
+					   GFP_KERNEL);
+		if (!ring[i]->cmd_base)
+			goto err_cleanup;
+
+		ring[i]->res_base = dma_zalloc_coherent(cryp->dev,
+					   MTK_DESC_RING_SZ,
+					   &ring[i]->res_dma,
+					   GFP_KERNEL);
+		if (!ring[i]->res_base)
+			goto err_cleanup;
+
+		ring[i]->cmd_next = ring[i]->cmd_base;
+		ring[i]->res_next = ring[i]->res_base;
+	}
+	return 0;
+
+err_cleanup:
+	for (; i--; ) {
+		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
+				  ring[i]->res_base, ring[i]->res_dma);
+		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
+				  ring[i]->cmd_base, ring[i]->cmd_dma);
+		kfree(ring[i]);
+	}
+	return err;
+}
+
+static int mtk_crypto_probe(struct platform_device *pdev)
+{
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	struct mtk_cryp *cryp;
+	int i, err;
+
+	cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
+	if (!cryp)
+		return -ENOMEM;
+
+	cryp->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(cryp->base))
+		return PTR_ERR(cryp->base);
+
+	for (i = 0; i < MTK_IRQ_NUM; i++) {
+		cryp->irq[i] = platform_get_irq(pdev, i);
+		if (cryp->irq[i] < 0) {
+			dev_err(cryp->dev, "no IRQ:%d resource info\n", i);
+			return cryp->irq[i];
+		}
+	}
+
+	cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp");
+	if (IS_ERR(cryp->clk_cryp))
+		return -EPROBE_DEFER;
+
+	cryp->dev = &pdev->dev;
+	pm_runtime_enable(cryp->dev);
+	pm_runtime_get_sync(cryp->dev);
+
+	err = clk_prepare_enable(cryp->clk_cryp);
+	if (err)
+		goto err_clk_cryp;
+
+	/* Allocate four command/result descriptor rings */
+	err = mtk_desc_ring_alloc(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Unable to allocate descriptor rings.\n");
+		goto err_resource;
+	}
+
+	/* Initialize hardware modules */
+	err = mtk_accelerator_init(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n");
+		goto err_engine;
+	}
+
+	err = mtk_cipher_alg_register(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Unable to register cipher algorithm.\n");
+		goto err_cipher;
+	}
+
+	err = mtk_hash_alg_register(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Unable to register hash algorithm.\n");
+		goto err_hash;
+	}
+
+	platform_set_drvdata(pdev, cryp);
+	return 0;
+
+err_hash:
+	mtk_cipher_alg_release(cryp);
+err_cipher:
+	mtk_dfe_dse_reset(cryp);
+err_engine:
+	mtk_desc_dma_free(cryp);
+err_resource:
+	clk_disable_unprepare(cryp->clk_cryp);
+err_clk_cryp:
+	pm_runtime_put_sync(cryp->dev);
+	pm_runtime_disable(cryp->dev);
+
+	return err;
+}
+
+static int mtk_crypto_remove(struct platform_device *pdev)
+{
+	struct mtk_cryp *cryp = platform_get_drvdata(pdev);
+
+	mtk_hash_alg_release(cryp);
+	mtk_cipher_alg_release(cryp);
+	mtk_desc_dma_free(cryp);
+
+	clk_disable_unprepare(cryp->clk_cryp);
+
+	pm_runtime_put_sync(cryp->dev);
+	pm_runtime_disable(cryp->dev);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id of_crypto_id[] = {
+	{ .compatible = "mediatek,eip97-crypto" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_crypto_id);
+
+static struct platform_driver mtk_crypto_driver = {
+	.probe = mtk_crypto_probe,
+	.remove = mtk_crypto_remove,
+	.driver = {
+		   .name = "mtk-crypto",
+		   .of_match_table = of_crypto_id,
+	},
+};
+module_platform_driver(mtk_crypto_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>");
+MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");
diff --git a/drivers/crypto/mediatek/mtk-platform.h b/drivers/crypto/mediatek/mtk-platform.h
new file mode 100644
index 0000000..f0831f1
--- /dev/null
+++ b/drivers/crypto/mediatek/mtk-platform.h
@@ -0,0 +1,235 @@
+/*
+ * Driver for EIP97 cryptographic accelerator.
+ *
+ * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __MTK_PLATFORM_H_
+#define __MTK_PLATFORM_H_
+
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include "mtk-regs.h"
+
+#define MTK_RDR_PROC_THRESH	BIT(0)
+#define MTK_RDR_PROC_MODE	BIT(23)
+#define MTK_CNT_RST		BIT(31)
+#define MTK_IRQ_RDR0		BIT(1)
+#define MTK_IRQ_RDR1		BIT(3)
+#define MTK_IRQ_RDR2		BIT(5)
+#define MTK_IRQ_RDR3		BIT(7)
+
+#define SIZE_IN_WORDS(x)	((x) >> 2)
+
+/**
+ * Ring 0/1 are used by AES encrypt and decrypt.
+ * Ring 2/3 are used by SHA.
+ */
+enum {
+	MTK_RING0,
+	MTK_RING1,
+	MTK_RING2,
+	MTK_RING3,
+	MTK_RING_MAX
+};
+
+#define MTK_REC_NUM		(MTK_RING_MAX / 2)
+#define MTK_IRQ_NUM		5
+
+/**
+ * struct mtk_desc - DMA descriptor
+ * @hdr:	the descriptor control header
+ * @buf:	DMA address of input buffer segment
+ * @ct:		DMA address of command token that control operation flow
+ * @ct_hdr:	the command token control header
+ * @tag:	the user-defined field
+ * @tfm:	DMA address of transform state
+ * @bound:	align descriptors offset boundary
+ *
+ * Structure passed to the crypto engine to describe where source
+ * data needs to be fetched and how it needs to be processed.
+ */
+struct mtk_desc {
+	__le32 hdr;
+	__le32 buf;
+	__le32 ct;
+	__le32 ct_hdr;
+	__le32 tag;
+	__le32 tfm;
+	__le32 bound[2];
+};
+
+#define MTK_DESC_NUM		512
+#define MTK_DESC_OFF		SIZE_IN_WORDS(sizeof(struct mtk_desc))
+#define MTK_DESC_SZ		(MTK_DESC_OFF - 2)
+#define MTK_DESC_RING_SZ	((sizeof(struct mtk_desc) * MTK_DESC_NUM))
+#define MTK_DESC_CNT(x)		((MTK_DESC_OFF * (x)) << 2)
+#define MTK_DESC_LAST		cpu_to_le32(BIT(22))
+#define MTK_DESC_FIRST		cpu_to_le32(BIT(23))
+#define MTK_DESC_BUF_LEN(x)	cpu_to_le32(x)
+#define MTK_DESC_CT_LEN(x)	cpu_to_le32((x) << 24)
+
+/**
+ * struct mtk_ring - Descriptor ring
+ * @cmd_base:	pointer to command descriptor ring base
+ * @cmd_next:	pointer to the next command descriptor
+ * @cmd_dma:	DMA address of command descriptor ring
+ * @res_base:	pointer to result descriptor ring base
+ * @res_next:	pointer to the next result descriptor
+ * @res_prev:	pointer to the previous result descriptor
+ * @res_dma:	DMA address of result descriptor ring
+ *
+ * A descriptor ring is a circular buffer that is used to manage
+ * one or more descriptors. There are two type of descriptor rings;
+ * the command descriptor ring and result descriptor ring.
+ */
+struct mtk_ring {
+	struct mtk_desc *cmd_base;
+	struct mtk_desc *cmd_next;
+	dma_addr_t cmd_dma;
+	struct mtk_desc *res_base;
+	struct mtk_desc *res_next;
+	struct mtk_desc *res_prev;
+	dma_addr_t res_dma;
+};
+
+/**
+ * struct mtk_aes_dma - Structure that holds sg list info
+ * @sg:		pointer to scatter-gather list
+ * @nents:	number of entries in the sg list
+ * @remainder:	remainder of sg list
+ * @sg_len:	number of entries in the sg mapped list
+ */
+struct mtk_aes_dma {
+	struct scatterlist *sg;
+	int nents;
+	u32 remainder;
+	u32 sg_len;
+};
+
+struct mtk_aes_base_ctx;
+struct mtk_aes_rec;
+struct mtk_cryp;
+
+typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
+
+/**
+ * struct mtk_aes_rec - AES operation record
+ * @cryp:	pointer to Cryptographic device
+ * @queue:	crypto request queue
+ * @areq:	pointer to async request
+ * @done_task:	the tasklet is use in AES interrupt
+ * @queue_task:	the tasklet is used to dequeue request
+ * @ctx:	pointer to current context
+ * @src:	the structure that holds source sg list info
+ * @dst:	the structure that holds destination sg list info
+ * @aligned_sg:	the scatter list is use to alignment
+ * @real_dst:	pointer to the destination sg list
+ * @resume:	pointer to resume function
+ * @total:	request buffer length
+ * @buf:	pointer to page buffer
+ * @id:		the current use of ring
+ * @flags:	it's describing AES operation state
+ * @lock:	the async queue lock
+ *
+ * Structure used to record AES execution state.
+ */
+struct mtk_aes_rec {
+	struct mtk_cryp *cryp;
+	struct crypto_queue queue;
+	struct crypto_async_request *areq;
+	struct tasklet_struct done_task;
+	struct tasklet_struct queue_task;
+	struct mtk_aes_base_ctx *ctx;
+	struct mtk_aes_dma src;
+	struct mtk_aes_dma dst;
+
+	struct scatterlist aligned_sg;
+	struct scatterlist *real_dst;
+
+	mtk_aes_fn resume;
+
+	size_t total;
+	void *buf;
+
+	u8 id;
+	unsigned long flags;
+	/* queue lock */
+	spinlock_t lock;
+};
+
+/**
+ * struct mtk_sha_rec - SHA operation record
+ * @cryp:	pointer to Cryptographic device
+ * @queue:	crypto request queue
+ * @req:	pointer to ahash request
+ * @done_task:	the tasklet is use in SHA interrupt
+ * @queue_task:	the tasklet is used to dequeue request
+ * @id:		the current use of ring
+ * @flags:	it's describing SHA operation state
+ * @lock:	the async queue lock
+ *
+ * Structure used to record SHA execution state.
+ */
+struct mtk_sha_rec {
+	struct mtk_cryp *cryp;
+	struct crypto_queue queue;
+	struct ahash_request *req;
+	struct tasklet_struct done_task;
+	struct tasklet_struct queue_task;
+
+	u8 id;
+	unsigned long flags;
+	/* queue lock */
+	spinlock_t lock;
+};
+
+/**
+ * struct mtk_cryp - Cryptographic device
+ * @base:	pointer to mapped register I/O base
+ * @dev:	pointer to device
+ * @clk_cryp:	pointer to crypto clock
+ * @irq:	global system and rings IRQ
+ * @ring:	pointer to descriptor rings
+ * @aes:	pointer to operation record of AES
+ * @sha:	pointer to operation record of SHA
+ * @aes_list:	device list of AES
+ * @sha_list:	device list of SHA
+ * @rec:	it's used to select SHA record for tfm
+ *
+ * Structure storing cryptographic device information.
+ */
+struct mtk_cryp {
+	void __iomem *base;
+	struct device *dev;
+	struct clk *clk_cryp;
+	int irq[MTK_IRQ_NUM];
+
+	struct mtk_ring *ring[MTK_RING_MAX];
+	struct mtk_aes_rec *aes[MTK_REC_NUM];
+	struct mtk_sha_rec *sha[MTK_REC_NUM];
+
+	struct list_head aes_list;
+	struct list_head sha_list;
+
+	bool rec;
+};
+
+int mtk_cipher_alg_register(struct mtk_cryp *cryp);
+void mtk_cipher_alg_release(struct mtk_cryp *cryp);
+int mtk_hash_alg_register(struct mtk_cryp *cryp);
+void mtk_hash_alg_release(struct mtk_cryp *cryp);
+
+#endif /* __MTK_PLATFORM_H_ */
diff --git a/drivers/crypto/mediatek/mtk-regs.h b/drivers/crypto/mediatek/mtk-regs.h
new file mode 100644
index 0000000..94f4eb8
--- /dev/null
+++ b/drivers/crypto/mediatek/mtk-regs.h
@@ -0,0 +1,194 @@
+/*
+ * Support for MediaTek cryptographic accelerator.
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ */
+
+#ifndef __MTK_REGS_H__
+#define __MTK_REGS_H__
+
+/* HIA, Command Descriptor Ring Manager */
+#define CDR_BASE_ADDR_LO(x)		(0x0 + ((x) << 12))
+#define CDR_BASE_ADDR_HI(x)		(0x4 + ((x) << 12))
+#define CDR_DATA_BASE_ADDR_LO(x)	(0x8 + ((x) << 12))
+#define CDR_DATA_BASE_ADDR_HI(x)	(0xC + ((x) << 12))
+#define CDR_ACD_BASE_ADDR_LO(x)		(0x10 + ((x) << 12))
+#define CDR_ACD_BASE_ADDR_HI(x)		(0x14 + ((x) << 12))
+#define CDR_RING_SIZE(x)		(0x18 + ((x) << 12))
+#define CDR_DESC_SIZE(x)		(0x1C + ((x) << 12))
+#define CDR_CFG(x)			(0x20 + ((x) << 12))
+#define CDR_DMA_CFG(x)			(0x24 + ((x) << 12))
+#define CDR_THRESH(x)			(0x28 + ((x) << 12))
+#define CDR_PREP_COUNT(x)		(0x2C + ((x) << 12))
+#define CDR_PROC_COUNT(x)		(0x30 + ((x) << 12))
+#define CDR_PREP_PNTR(x)		(0x34 + ((x) << 12))
+#define CDR_PROC_PNTR(x)		(0x38 + ((x) << 12))
+#define CDR_STAT(x)			(0x3C + ((x) << 12))
+
+/* HIA, Result Descriptor Ring Manager */
+#define RDR_BASE_ADDR_LO(x)		(0x800 + ((x) << 12))
+#define RDR_BASE_ADDR_HI(x)		(0x804 + ((x) << 12))
+#define RDR_DATA_BASE_ADDR_LO(x)	(0x808 + ((x) << 12))
+#define RDR_DATA_BASE_ADDR_HI(x)	(0x80C + ((x) << 12))
+#define RDR_ACD_BASE_ADDR_LO(x)		(0x810 + ((x) << 12))
+#define RDR_ACD_BASE_ADDR_HI(x)		(0x814 + ((x) << 12))
+#define RDR_RING_SIZE(x)		(0x818 + ((x) << 12))
+#define RDR_DESC_SIZE(x)		(0x81C + ((x) << 12))
+#define RDR_CFG(x)			(0x820 + ((x) << 12))
+#define RDR_DMA_CFG(x)			(0x824 + ((x) << 12))
+#define RDR_THRESH(x)			(0x828 + ((x) << 12))
+#define RDR_PREP_COUNT(x)		(0x82C + ((x) << 12))
+#define RDR_PROC_COUNT(x)		(0x830 + ((x) << 12))
+#define RDR_PREP_PNTR(x)		(0x834 + ((x) << 12))
+#define RDR_PROC_PNTR(x)		(0x838 + ((x) << 12))
+#define RDR_STAT(x)			(0x83C + ((x) << 12))
+
+/* HIA, Ring AIC */
+#define AIC_POL_CTRL(x)			(0xE000 - ((x) << 12))
+#define	AIC_TYPE_CTRL(x)		(0xE004 - ((x) << 12))
+#define	AIC_ENABLE_CTRL(x)		(0xE008 - ((x) << 12))
+#define	AIC_RAW_STAL(x)			(0xE00C - ((x) << 12))
+#define	AIC_ENABLE_SET(x)		(0xE00C - ((x) << 12))
+#define	AIC_ENABLED_STAT(x)		(0xE010 - ((x) << 12))
+#define	AIC_ACK(x)			(0xE010 - ((x) << 12))
+#define	AIC_ENABLE_CLR(x)		(0xE014 - ((x) << 12))
+#define	AIC_OPTIONS(x)			(0xE018 - ((x) << 12))
+#define	AIC_VERSION(x)			(0xE01C - ((x) << 12))
+
+/* HIA, Global AIC */
+#define AIC_G_POL_CTRL			0xF800
+#define AIC_G_TYPE_CTRL			0xF804
+#define AIC_G_ENABLE_CTRL		0xF808
+#define AIC_G_RAW_STAT			0xF80C
+#define AIC_G_ENABLE_SET		0xF80C
+#define AIC_G_ENABLED_STAT		0xF810
+#define AIC_G_ACK			0xF810
+#define AIC_G_ENABLE_CLR		0xF814
+#define AIC_G_OPTIONS			0xF818
+#define AIC_G_VERSION			0xF81C
+
+/* HIA, Data Fetch Engine */
+#define DFE_CFG				0xF000
+#define DFE_PRIO_0			0xF010
+#define DFE_PRIO_1			0xF014
+#define DFE_PRIO_2			0xF018
+#define DFE_PRIO_3			0xF01C
+
+/* HIA, Data Fetch Engine access monitoring for CDR */
+#define DFE_RING_REGION_LO(x)		(0xF080 + ((x) << 3))
+#define DFE_RING_REGION_HI(x)		(0xF084 + ((x) << 3))
+
+/* HIA, Data Fetch Engine thread control and status for thread */
+#define DFE_THR_CTRL			0xF200
+#define DFE_THR_STAT			0xF204
+#define DFE_THR_DESC_CTRL		0xF208
+#define DFE_THR_DESC_DPTR_LO		0xF210
+#define DFE_THR_DESC_DPTR_HI		0xF214
+#define DFE_THR_DESC_ACDPTR_LO		0xF218
+#define DFE_THR_DESC_ACDPTR_HI		0xF21C
+
+/* HIA, Data Store Engine */
+#define DSE_CFG				0xF400
+#define DSE_PRIO_0			0xF410
+#define DSE_PRIO_1			0xF414
+#define DSE_PRIO_2			0xF418
+#define DSE_PRIO_3			0xF41C
+
+/* HIA, Data Store Engine access monitoring for RDR */
+#define DSE_RING_REGION_LO(x)		(0xF480 + ((x) << 3))
+#define DSE_RING_REGION_HI(x)		(0xF484 + ((x) << 3))
+
+/* HIA, Data Store Engine thread control and status for thread */
+#define DSE_THR_CTRL			0xF600
+#define DSE_THR_STAT			0xF604
+#define DSE_THR_DESC_CTRL		0xF608
+#define DSE_THR_DESC_DPTR_LO		0xF610
+#define DSE_THR_DESC_DPTR_HI		0xF614
+#define DSE_THR_DESC_S_DPTR_LO		0xF618
+#define DSE_THR_DESC_S_DPTR_HI		0xF61C
+#define DSE_THR_ERROR_STAT		0xF620
+
+/* HIA Global */
+#define HIA_MST_CTRL			0xFFF4
+#define HIA_OPTIONS			0xFFF8
+#define HIA_VERSION			0xFFFC
+
+/* Processing Engine Input Side, Processing Engine */
+#define PE_IN_DBUF_THRESH		0x10000
+#define PE_IN_TBUF_THRESH		0x10100
+
+/* Packet Engine Configuration / Status Registers */
+#define PE_TOKEN_CTRL_STAT		0x11000
+#define PE_FUNCTION_EN			0x11004
+#define PE_CONTEXT_CTRL			0x11008
+#define PE_INTERRUPT_CTRL_STAT		0x11010
+#define PE_CONTEXT_STAT			0x1100C
+#define PE_OUT_TRANS_CTRL_STAT		0x11018
+#define PE_OUT_BUF_CTRL			0x1101C
+
+/* Packet Engine PRNG Registers */
+#define PE_PRNG_STAT			0x11040
+#define PE_PRNG_CTRL			0x11044
+#define PE_PRNG_SEED_L			0x11048
+#define PE_PRNG_SEED_H			0x1104C
+#define PE_PRNG_KEY_0_L			0x11050
+#define PE_PRNG_KEY_0_H			0x11054
+#define PE_PRNG_KEY_1_L			0x11058
+#define PE_PRNG_KEY_1_H			0x1105C
+#define PE_PRNG_RES_0			0x11060
+#define PE_PRNG_RES_1			0x11064
+#define PE_PRNG_RES_2			0x11068
+#define PE_PRNG_RES_3			0x1106C
+#define PE_PRNG_LFSR_L			0x11070
+#define PE_PRNG_LFSR_H			0x11074
+
+/* Packet Engine AIC */
+#define PE_EIP96_AIC_POL_CTRL		0x113C0
+#define PE_EIP96_AIC_TYPE_CTRL		0x113C4
+#define PE_EIP96_AIC_ENABLE_CTRL	0x113C8
+#define PE_EIP96_AIC_RAW_STAT		0x113CC
+#define PE_EIP96_AIC_ENABLE_SET		0x113CC
+#define PE_EIP96_AIC_ENABLED_STAT	0x113D0
+#define PE_EIP96_AIC_ACK		0x113D0
+#define PE_EIP96_AIC_ENABLE_CLR		0x113D4
+#define PE_EIP96_AIC_OPTIONS		0x113D8
+#define PE_EIP96_AIC_VERSION		0x113DC
+
+/* Packet Engine Options & Version Registers */
+#define PE_EIP96_OPTIONS		0x113F8
+#define PE_EIP96_VERSION		0x113FC
+
+/* Processing Engine Output Side */
+#define PE_OUT_DBUF_THRESH		0x11C00
+#define PE_OUT_TBUF_THRESH		0x11D00
+
+/* Processing Engine Local AIC */
+#define PE_AIC_POL_CTRL			0x11F00
+#define PE_AIC_TYPE_CTRL		0x11F04
+#define PE_AIC_ENABLE_CTRL		0x11F08
+#define PE_AIC_RAW_STAT			0x11F0C
+#define PE_AIC_ENABLE_SET		0x11F0C
+#define PE_AIC_ENABLED_STAT		0x11F10
+#define PE_AIC_ENABLE_CLR		0x11F14
+#define PE_AIC_OPTIONS			0x11F18
+#define PE_AIC_VERSION			0x11F1C
+
+/* Processing Engine General Configuration and Version */
+#define PE_IN_FLIGHT			0x11FF0
+#define PE_OPTIONS			0x11FF8
+#define PE_VERSION			0x11FFC
+
+/* EIP-97 - Global */
+#define EIP97_CLOCK_STATE		0x1FFE4
+#define EIP97_FORCE_CLOCK_ON		0x1FFE8
+#define EIP97_FORCE_CLOCK_OFF		0x1FFEC
+#define EIP97_MST_CTRL			0x1FFF4
+#define EIP97_OPTIONS			0x1FFF8
+#define EIP97_VERSION			0x1FFFC
+#endif /* __MTK_REGS_H__ */
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
new file mode 100644
index 0000000..5f4f845
--- /dev/null
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -0,0 +1,1359 @@
+/*
+ * Cryptographic API.
+ *
+ * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
+ *
+ * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Some ideas are from atmel-sha.c and omap-sham.c drivers.
+ */
+
+#include <crypto/hmac.h>
+#include <crypto/sha.h>
+#include "mtk-platform.h"
+
+#define SHA_ALIGN_MSK		(sizeof(u32) - 1)
+#define SHA_QUEUE_SIZE		512
+#define SHA_BUF_SIZE		((u32)PAGE_SIZE)
+
+#define SHA_OP_UPDATE		1
+#define SHA_OP_FINAL		2
+
+#define SHA_DATA_LEN_MSK	cpu_to_le32(GENMASK(16, 0))
+#define SHA_MAX_DIGEST_BUF_SIZE	32
+
+/* SHA command token */
+#define SHA_CT_SIZE		5
+#define SHA_CT_CTRL_HDR		cpu_to_le32(0x02220000)
+#define SHA_CMD0		cpu_to_le32(0x03020000)
+#define SHA_CMD1		cpu_to_le32(0x21060000)
+#define SHA_CMD2		cpu_to_le32(0xe0e63802)
+
+/* SHA transform information */
+#define SHA_TFM_HASH		cpu_to_le32(0x2 << 0)
+#define SHA_TFM_SIZE(x)		cpu_to_le32((x) << 8)
+#define SHA_TFM_START		cpu_to_le32(0x1 << 4)
+#define SHA_TFM_CONTINUE	cpu_to_le32(0x1 << 5)
+#define SHA_TFM_HASH_STORE	cpu_to_le32(0x1 << 19)
+#define SHA_TFM_SHA1		cpu_to_le32(0x2 << 23)
+#define SHA_TFM_SHA256		cpu_to_le32(0x3 << 23)
+#define SHA_TFM_SHA224		cpu_to_le32(0x4 << 23)
+#define SHA_TFM_SHA512		cpu_to_le32(0x5 << 23)
+#define SHA_TFM_SHA384		cpu_to_le32(0x6 << 23)
+#define SHA_TFM_DIGEST(x)	cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
+
+/* SHA flags */
+#define SHA_FLAGS_BUSY		BIT(0)
+#define	SHA_FLAGS_FINAL		BIT(1)
+#define SHA_FLAGS_FINUP		BIT(2)
+#define SHA_FLAGS_SG		BIT(3)
+#define SHA_FLAGS_ALGO_MSK	GENMASK(8, 4)
+#define SHA_FLAGS_SHA1		BIT(4)
+#define SHA_FLAGS_SHA224	BIT(5)
+#define SHA_FLAGS_SHA256	BIT(6)
+#define SHA_FLAGS_SHA384	BIT(7)
+#define SHA_FLAGS_SHA512	BIT(8)
+#define SHA_FLAGS_HMAC		BIT(9)
+#define SHA_FLAGS_PAD		BIT(10)
+
+/**
+ * mtk_sha_info - hardware information of AES
+ * @cmd:	command token, hardware instruction
+ * @tfm:	transform state of cipher algorithm.
+ * @state:	contains keys and initial vectors.
+ *
+ */
+struct mtk_sha_info {
+	__le32 ctrl[2];
+	__le32 cmd[3];
+	__le32 tfm[2];
+	__le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
+};
+
+struct mtk_sha_reqctx {
+	struct mtk_sha_info info;
+	unsigned long flags;
+	unsigned long op;
+
+	u64 digcnt;
+	size_t bufcnt;
+	dma_addr_t dma_addr;
+
+	__le32 ct_hdr;
+	u32 ct_size;
+	dma_addr_t ct_dma;
+	dma_addr_t tfm_dma;
+
+	/* Walk state */
+	struct scatterlist *sg;
+	u32 offset;	/* Offset in current sg */
+	u32 total;	/* Total request */
+	size_t ds;
+	size_t bs;
+
+	u8 *buffer;
+};
+
+struct mtk_sha_hmac_ctx {
+	struct crypto_shash	*shash;
+	u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+	u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+};
+
+struct mtk_sha_ctx {
+	struct mtk_cryp *cryp;
+	unsigned long flags;
+	u8 id;
+	u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
+
+	struct mtk_sha_hmac_ctx	base[0];
+};
+
+struct mtk_sha_drv {
+	struct list_head dev_list;
+	/* Device list lock */
+	spinlock_t lock;
+};
+
+static struct mtk_sha_drv mtk_sha = {
+	.dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
+};
+
+static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
+				struct ahash_request *req);
+
+static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
+{
+	return readl_relaxed(cryp->base + offset);
+}
+
+static inline void mtk_sha_write(struct mtk_cryp *cryp,
+				 u32 offset, u32 value)
+{
+	writel_relaxed(value, cryp->base + offset);
+}
+
+static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
+				      struct mtk_desc **cmd_curr,
+				      struct mtk_desc **res_curr,
+				      int *count)
+{
+	*cmd_curr = ring->cmd_next++;
+	*res_curr = ring->res_next++;
+	(*count)++;
+
+	if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
+		ring->cmd_next = ring->cmd_base;
+		ring->res_next = ring->res_base;
+	}
+}
+
+static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
+{
+	struct mtk_cryp *cryp = NULL;
+	struct mtk_cryp *tmp;
+
+	spin_lock_bh(&mtk_sha.lock);
+	if (!tctx->cryp) {
+		list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
+			cryp = tmp;
+			break;
+		}
+		tctx->cryp = cryp;
+	} else {
+		cryp = tctx->cryp;
+	}
+
+	/*
+	 * Assign record id to tfm in round-robin fashion, and this
+	 * will help tfm to bind  to corresponding descriptor rings.
+	 */
+	tctx->id = cryp->rec;
+	cryp->rec = !cryp->rec;
+
+	spin_unlock_bh(&mtk_sha.lock);
+
+	return cryp;
+}
+
+static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
+{
+	size_t count;
+
+	while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
+		count = min(ctx->sg->length - ctx->offset, ctx->total);
+		count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
+
+		if (count <= 0) {
+			/*
+			 * Check if count <= 0 because the buffer is full or
+			 * because the sg length is 0. In the latest case,
+			 * check if there is another sg in the list, a 0 length
+			 * sg doesn't necessarily mean the end of the sg list.
+			 */
+			if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
+				ctx->sg = sg_next(ctx->sg);
+				continue;
+			} else {
+				break;
+			}
+		}
+
+		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
+					 ctx->offset, count, 0);
+
+		ctx->bufcnt += count;
+		ctx->offset += count;
+		ctx->total -= count;
+
+		if (ctx->offset == ctx->sg->length) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+			else
+				ctx->total = 0;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
+ *
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
+ *  - if message length < 56 bytes then padlen = 56 - message length
+ *  - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ *  - if message length < 112 bytes then padlen = 112 - message length
+ *  - else padlen = 128 + 112 - message length
+ */
+static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
+{
+	u32 index, padlen;
+	u64 bits[2];
+	u64 size = ctx->digcnt;
+
+	size += ctx->bufcnt;
+	size += len;
+
+	bits[1] = cpu_to_be64(size << 3);
+	bits[0] = cpu_to_be64(size >> 61);
+
+	switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
+	case SHA_FLAGS_SHA384:
+	case SHA_FLAGS_SHA512:
+		index = ctx->bufcnt & 0x7f;
+		padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
+		*(ctx->buffer + ctx->bufcnt) = 0x80;
+		memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
+		memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
+		ctx->bufcnt += padlen + 16;
+		ctx->flags |= SHA_FLAGS_PAD;
+		break;
+
+	default:
+		index = ctx->bufcnt & 0x3f;
+		padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+		*(ctx->buffer + ctx->bufcnt) = 0x80;
+		memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
+		memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
+		ctx->bufcnt += padlen + 8;
+		ctx->flags |= SHA_FLAGS_PAD;
+		break;
+	}
+}
+
+/* Initialize basic transform information of SHA */
+static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
+{
+	struct mtk_sha_info *info = &ctx->info;
+
+	ctx->ct_hdr = SHA_CT_CTRL_HDR;
+	ctx->ct_size = SHA_CT_SIZE;
+
+	info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
+
+	switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
+	case SHA_FLAGS_SHA1:
+		info->tfm[0] |= SHA_TFM_SHA1;
+		break;
+	case SHA_FLAGS_SHA224:
+		info->tfm[0] |= SHA_TFM_SHA224;
+		break;
+	case SHA_FLAGS_SHA256:
+		info->tfm[0] |= SHA_TFM_SHA256;
+		break;
+	case SHA_FLAGS_SHA384:
+		info->tfm[0] |= SHA_TFM_SHA384;
+		break;
+	case SHA_FLAGS_SHA512:
+		info->tfm[0] |= SHA_TFM_SHA512;
+		break;
+
+	default:
+		/* Should not happen... */
+		return;
+	}
+
+	info->tfm[1] = SHA_TFM_HASH_STORE;
+	info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
+	info->ctrl[1] = info->tfm[1];
+
+	info->cmd[0] = SHA_CMD0;
+	info->cmd[1] = SHA_CMD1;
+	info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
+}
+
+/*
+ * Update input data length field of transform information and
+ * map it to DMA region.
+ */
+static int mtk_sha_info_update(struct mtk_cryp *cryp,
+			       struct mtk_sha_rec *sha,
+			       size_t len1, size_t len2)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
+	struct mtk_sha_info *info = &ctx->info;
+
+	ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
+	ctx->ct_hdr |= cpu_to_le32(len1 + len2);
+	info->cmd[0] &= ~SHA_DATA_LEN_MSK;
+	info->cmd[0] |= cpu_to_le32(len1 + len2);
+
+	/* Setting SHA_TFM_START only for the first iteration */
+	if (ctx->digcnt)
+		info->ctrl[0] &= ~SHA_TFM_START;
+
+	ctx->digcnt += len1;
+
+	ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
+				     DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
+		dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
+		return -EINVAL;
+	}
+
+	ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
+
+	return 0;
+}
+
+/*
+ * Because of hardware limitation, we must pre-calculate the inner
+ * and outer digest that need to be processed firstly by engine, then
+ * apply the result digest to the input message. These complex hashing
+ * procedures limits HMAC performance, so we use fallback SW encoding.
+ */
+static int mtk_sha_finish_hmac(struct ahash_request *req)
+{
+	struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct mtk_sha_hmac_ctx *bctx = tctx->base;
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	SHASH_DESC_ON_STACK(shash, bctx->shash);
+
+	shash->tfm = bctx->shash;
+	shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+
+	return crypto_shash_init(shash) ?:
+	       crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
+	       crypto_shash_finup(shash, req->result, ctx->ds, req->result);
+}
+
+/* Initialize request context */
+static int mtk_sha_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	ctx->flags = 0;
+	ctx->ds = crypto_ahash_digestsize(tfm);
+
+	switch (ctx->ds) {
+	case SHA1_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA1;
+		ctx->bs = SHA1_BLOCK_SIZE;
+		break;
+	case SHA224_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA224;
+		ctx->bs = SHA224_BLOCK_SIZE;
+		break;
+	case SHA256_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA256;
+		ctx->bs = SHA256_BLOCK_SIZE;
+		break;
+	case SHA384_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA384;
+		ctx->bs = SHA384_BLOCK_SIZE;
+		break;
+	case SHA512_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA512;
+		ctx->bs = SHA512_BLOCK_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ctx->bufcnt = 0;
+	ctx->digcnt = 0;
+	ctx->buffer = tctx->buf;
+
+	if (tctx->flags & SHA_FLAGS_HMAC) {
+		struct mtk_sha_hmac_ctx *bctx = tctx->base;
+
+		memcpy(ctx->buffer, bctx->ipad, ctx->bs);
+		ctx->bufcnt = ctx->bs;
+		ctx->flags |= SHA_FLAGS_HMAC;
+	}
+
+	return 0;
+}
+
+static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
+			dma_addr_t addr1, size_t len1,
+			dma_addr_t addr2, size_t len2)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
+	struct mtk_ring *ring = cryp->ring[sha->id];
+	struct mtk_desc *cmd, *res;
+	int err, count = 0;
+
+	err = mtk_sha_info_update(cryp, sha, len1, len2);
+	if (err)
+		return err;
+
+	/* Fill in the command/result descriptors */
+	mtk_sha_ring_shift(ring, &cmd, &res, &count);
+
+	res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
+	cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
+		   MTK_DESC_CT_LEN(ctx->ct_size);
+	cmd->buf = cpu_to_le32(addr1);
+	cmd->ct = cpu_to_le32(ctx->ct_dma);
+	cmd->ct_hdr = ctx->ct_hdr;
+	cmd->tfm = cpu_to_le32(ctx->tfm_dma);
+
+	if (len2) {
+		mtk_sha_ring_shift(ring, &cmd, &res, &count);
+
+		res->hdr = MTK_DESC_BUF_LEN(len2);
+		cmd->hdr = MTK_DESC_BUF_LEN(len2);
+		cmd->buf = cpu_to_le32(addr2);
+	}
+
+	cmd->hdr |= MTK_DESC_LAST;
+	res->hdr |= MTK_DESC_LAST;
+
+	/*
+	 * Make sure that all changes to the DMA ring are done before we
+	 * start engine.
+	 */
+	wmb();
+	/* Start DMA transfer */
+	mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
+	mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
+
+	return -EINPROGRESS;
+}
+
+static int mtk_sha_dma_map(struct mtk_cryp *cryp,
+			   struct mtk_sha_rec *sha,
+			   struct mtk_sha_reqctx *ctx,
+			   size_t count)
+{
+	ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
+				       SHA_BUF_SIZE, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
+		dev_err(cryp->dev, "dma map error\n");
+		return -EINVAL;
+	}
+
+	ctx->flags &= ~SHA_FLAGS_SG;
+
+	return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
+}
+
+static int mtk_sha_update_slow(struct mtk_cryp *cryp,
+			       struct mtk_sha_rec *sha)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
+	size_t count;
+	u32 final;
+
+	mtk_sha_append_sg(ctx);
+
+	final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
+
+	dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
+
+	if (final) {
+		sha->flags |= SHA_FLAGS_FINAL;
+		mtk_sha_fill_padding(ctx, 0);
+	}
+
+	if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+
+		return mtk_sha_dma_map(cryp, sha, ctx, count);
+	}
+	return 0;
+}
+
+static int mtk_sha_update_start(struct mtk_cryp *cryp,
+				struct mtk_sha_rec *sha)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
+	u32 len, final, tail;
+	struct scatterlist *sg;
+
+	if (!ctx->total)
+		return 0;
+
+	if (ctx->bufcnt || ctx->offset)
+		return mtk_sha_update_slow(cryp, sha);
+
+	sg = ctx->sg;
+
+	if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+		return mtk_sha_update_slow(cryp, sha);
+
+	if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
+		/* size is not ctx->bs aligned */
+		return mtk_sha_update_slow(cryp, sha);
+
+	len = min(ctx->total, sg->length);
+
+	if (sg_is_last(sg)) {
+		if (!(ctx->flags & SHA_FLAGS_FINUP)) {
+			/* not last sg must be ctx->bs aligned */
+			tail = len & (ctx->bs - 1);
+			len -= tail;
+		}
+	}
+
+	ctx->total -= len;
+	ctx->offset = len; /* offset where to start slow */
+
+	final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
+
+	/* Add padding */
+	if (final) {
+		size_t count;
+
+		tail = len & (ctx->bs - 1);
+		len -= tail;
+		ctx->total += tail;
+		ctx->offset = len; /* offset where to start slow */
+
+		sg = ctx->sg;
+		mtk_sha_append_sg(ctx);
+		mtk_sha_fill_padding(ctx, len);
+
+		ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
+					       SHA_BUF_SIZE, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
+			dev_err(cryp->dev, "dma map bytes error\n");
+			return -EINVAL;
+		}
+
+		sha->flags |= SHA_FLAGS_FINAL;
+		count = ctx->bufcnt;
+		ctx->bufcnt = 0;
+
+		if (len == 0) {
+			ctx->flags &= ~SHA_FLAGS_SG;
+			return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
+					    count, 0, 0);
+
+		} else {
+			ctx->sg = sg;
+			if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+				dev_err(cryp->dev, "dma_map_sg error\n");
+				return -EINVAL;
+			}
+
+			ctx->flags |= SHA_FLAGS_SG;
+			return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
+					    len, ctx->dma_addr, count);
+		}
+	}
+
+	if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+		dev_err(cryp->dev, "dma_map_sg  error\n");
+		return -EINVAL;
+	}
+
+	ctx->flags |= SHA_FLAGS_SG;
+
+	return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
+			    len, 0, 0);
+}
+
+static int mtk_sha_final_req(struct mtk_cryp *cryp,
+			     struct mtk_sha_rec *sha)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
+	size_t count;
+
+	mtk_sha_fill_padding(ctx, 0);
+
+	sha->flags |= SHA_FLAGS_FINAL;
+	count = ctx->bufcnt;
+	ctx->bufcnt = 0;
+
+	return mtk_sha_dma_map(cryp, sha, ctx, count);
+}
+
+/* Copy ready hash (+ finalize hmac) */
+static int mtk_sha_finish(struct ahash_request *req)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+	__le32 *digest = ctx->info.digest;
+	u32 *result = (u32 *)req->result;
+	int i;
+
+	/* Get the hash from the digest buffer */
+	for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
+		result[i] = le32_to_cpu(digest[i]);
+
+	if (ctx->flags & SHA_FLAGS_HMAC)
+		return mtk_sha_finish_hmac(req);
+
+	return 0;
+}
+
+static void mtk_sha_finish_req(struct mtk_cryp *cryp,
+			       struct mtk_sha_rec *sha,
+			       int err)
+{
+	if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
+		err = mtk_sha_finish(sha->req);
+
+	sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
+
+	sha->req->base.complete(&sha->req->base, err);
+
+	/* Handle new request */
+	tasklet_schedule(&sha->queue_task);
+}
+
+static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
+				struct ahash_request *req)
+{
+	struct mtk_sha_rec *sha = cryp->sha[id];
+	struct crypto_async_request *async_req, *backlog;
+	struct mtk_sha_reqctx *ctx;
+	unsigned long flags;
+	int err = 0, ret = 0;
+
+	spin_lock_irqsave(&sha->lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&sha->queue, req);
+
+	if (SHA_FLAGS_BUSY & sha->flags) {
+		spin_unlock_irqrestore(&sha->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&sha->queue);
+	async_req = crypto_dequeue_request(&sha->queue);
+	if (async_req)
+		sha->flags |= SHA_FLAGS_BUSY;
+	spin_unlock_irqrestore(&sha->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ahash_request_cast(async_req);
+	ctx = ahash_request_ctx(req);
+
+	sha->req = req;
+
+	mtk_sha_info_init(ctx);
+
+	if (ctx->op == SHA_OP_UPDATE) {
+		err = mtk_sha_update_start(cryp, sha);
+		if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
+			/* No final() after finup() */
+			err = mtk_sha_final_req(cryp, sha);
+	} else if (ctx->op == SHA_OP_FINAL) {
+		err = mtk_sha_final_req(cryp, sha);
+	}
+
+	if (unlikely(err != -EINPROGRESS))
+		/* Task will not finish it, so do it here */
+		mtk_sha_finish_req(cryp, sha, err);
+
+	return ret;
+}
+
+static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+	struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->op = op;
+
+	return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
+}
+
+static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
+
+	dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
+			 DMA_BIDIRECTIONAL);
+
+	if (ctx->flags & SHA_FLAGS_SG) {
+		dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
+		if (ctx->sg->length == ctx->offset) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+		}
+		if (ctx->flags & SHA_FLAGS_PAD) {
+			dma_unmap_single(cryp->dev, ctx->dma_addr,
+					 SHA_BUF_SIZE, DMA_TO_DEVICE);
+		}
+	} else
+		dma_unmap_single(cryp->dev, ctx->dma_addr,
+				 SHA_BUF_SIZE, DMA_TO_DEVICE);
+}
+
+static void mtk_sha_complete(struct mtk_cryp *cryp,
+			     struct mtk_sha_rec *sha)
+{
+	int err = 0;
+
+	err = mtk_sha_update_start(cryp, sha);
+	if (err != -EINPROGRESS)
+		mtk_sha_finish_req(cryp, sha, err);
+}
+
+static int mtk_sha_update(struct ahash_request *req)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	ctx->total = req->nbytes;
+	ctx->sg = req->src;
+	ctx->offset = 0;
+
+	if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
+	    !(ctx->flags & SHA_FLAGS_FINUP))
+		return mtk_sha_append_sg(ctx);
+
+	return mtk_sha_enqueue(req, SHA_OP_UPDATE);
+}
+
+static int mtk_sha_final(struct ahash_request *req)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	ctx->flags |= SHA_FLAGS_FINUP;
+
+	if (ctx->flags & SHA_FLAGS_PAD)
+		return mtk_sha_finish(req);
+
+	return mtk_sha_enqueue(req, SHA_OP_FINAL);
+}
+
+static int mtk_sha_finup(struct ahash_request *req)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+	int err1, err2;
+
+	ctx->flags |= SHA_FLAGS_FINUP;
+
+	err1 = mtk_sha_update(req);
+	if (err1 == -EINPROGRESS || err1 == -EBUSY)
+		return err1;
+	/*
+	 * final() has to be always called to cleanup resources
+	 * even if update() failed
+	 */
+	err2 = mtk_sha_final(req);
+
+	return err1 ?: err2;
+}
+
+static int mtk_sha_digest(struct ahash_request *req)
+{
+	return mtk_sha_init(req) ?: mtk_sha_finup(req);
+}
+
+static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
+			  u32 keylen)
+{
+	struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct mtk_sha_hmac_ctx *bctx = tctx->base;
+	size_t bs = crypto_shash_blocksize(bctx->shash);
+	size_t ds = crypto_shash_digestsize(bctx->shash);
+	int err, i;
+
+	SHASH_DESC_ON_STACK(shash, bctx->shash);
+
+	shash->tfm = bctx->shash;
+	shash->flags = crypto_shash_get_flags(bctx->shash) &
+		       CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	if (keylen > bs) {
+		err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
+		if (err)
+			return err;
+		keylen = ds;
+	} else {
+		memcpy(bctx->ipad, key, keylen);
+	}
+
+	memset(bctx->ipad + keylen, 0, bs - keylen);
+	memcpy(bctx->opad, bctx->ipad, bs);
+
+	for (i = 0; i < bs; i++) {
+		bctx->ipad[i] ^= HMAC_IPAD_VALUE;
+		bctx->opad[i] ^= HMAC_OPAD_VALUE;
+	}
+
+	return 0;
+}
+
+static int mtk_sha_export(struct ahash_request *req, void *out)
+{
+	const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	memcpy(out, ctx, sizeof(*ctx));
+	return 0;
+}
+
+static int mtk_sha_import(struct ahash_request *req, const void *in)
+{
+	struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	memcpy(ctx, in, sizeof(*ctx));
+	return 0;
+}
+
+static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
+				const char *alg_base)
+{
+	struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
+	struct mtk_cryp *cryp = NULL;
+
+	cryp = mtk_sha_find_dev(tctx);
+	if (!cryp)
+		return -ENODEV;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct mtk_sha_reqctx));
+
+	if (alg_base) {
+		struct mtk_sha_hmac_ctx *bctx = tctx->base;
+
+		tctx->flags |= SHA_FLAGS_HMAC;
+		bctx->shash = crypto_alloc_shash(alg_base, 0,
+					CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(bctx->shash)) {
+			pr_err("base driver %s could not be loaded.\n",
+			       alg_base);
+
+			return PTR_ERR(bctx->shash);
+		}
+	}
+	return 0;
+}
+
+static int mtk_sha_cra_init(struct crypto_tfm *tfm)
+{
+	return mtk_sha_cra_init_alg(tfm, NULL);
+}
+
+static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
+{
+	return mtk_sha_cra_init_alg(tfm, "sha1");
+}
+
+static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
+{
+	return mtk_sha_cra_init_alg(tfm, "sha224");
+}
+
+static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
+{
+	return mtk_sha_cra_init_alg(tfm, "sha256");
+}
+
+static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
+{
+	return mtk_sha_cra_init_alg(tfm, "sha384");
+}
+
+static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
+{
+	return mtk_sha_cra_init_alg(tfm, "sha512");
+}
+
+static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
+{
+	struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	if (tctx->flags & SHA_FLAGS_HMAC) {
+		struct mtk_sha_hmac_ctx *bctx = tctx->base;
+
+		crypto_free_shash(bctx->shash);
+	}
+}
+
+static struct ahash_alg algs_sha1_sha224_sha256[] = {
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "sha1",
+		.cra_driver_name	= "mtk-sha1",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.halg.digestsize	= SHA224_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "sha224",
+		.cra_driver_name	= "mtk-sha224",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= SHA224_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.halg.digestsize	= SHA256_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "sha256",
+		.cra_driver_name	= "mtk-sha256",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= SHA256_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.setkey		= mtk_sha_setkey,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "hmac(sha1)",
+		.cra_driver_name	= "mtk-hmac-sha1",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx) +
+					sizeof(struct mtk_sha_hmac_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_sha1_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.setkey		= mtk_sha_setkey,
+	.halg.digestsize	= SHA224_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "hmac(sha224)",
+		.cra_driver_name	= "mtk-hmac-sha224",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA224_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx) +
+					sizeof(struct mtk_sha_hmac_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_sha224_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.setkey		= mtk_sha_setkey,
+	.halg.digestsize	= SHA256_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "hmac(sha256)",
+		.cra_driver_name	= "mtk-hmac-sha256",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA256_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx) +
+					sizeof(struct mtk_sha_hmac_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_sha256_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+};
+
+static struct ahash_alg algs_sha384_sha512[] = {
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.halg.digestsize	= SHA384_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "sha384",
+		.cra_driver_name	= "mtk-sha384",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= SHA384_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.halg.digestsize	= SHA512_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "sha512",
+		.cra_driver_name	= "mtk-sha512",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= SHA512_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.setkey		= mtk_sha_setkey,
+	.halg.digestsize	= SHA384_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "hmac(sha384)",
+		.cra_driver_name	= "mtk-hmac-sha384",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA384_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx) +
+					sizeof(struct mtk_sha_hmac_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_sha384_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+{
+	.init		= mtk_sha_init,
+	.update		= mtk_sha_update,
+	.final		= mtk_sha_final,
+	.finup		= mtk_sha_finup,
+	.digest		= mtk_sha_digest,
+	.export		= mtk_sha_export,
+	.import		= mtk_sha_import,
+	.setkey		= mtk_sha_setkey,
+	.halg.digestsize	= SHA512_DIGEST_SIZE,
+	.halg.statesize = sizeof(struct mtk_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "hmac(sha512)",
+		.cra_driver_name	= "mtk-hmac-sha512",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA512_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct mtk_sha_ctx) +
+					sizeof(struct mtk_sha_hmac_ctx),
+		.cra_alignmask		= SHA_ALIGN_MSK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= mtk_sha_cra_sha512_init,
+		.cra_exit		= mtk_sha_cra_exit,
+	}
+},
+};
+
+static void mtk_sha_queue_task(unsigned long data)
+{
+	struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
+
+	mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
+}
+
+static void mtk_sha_done_task(unsigned long data)
+{
+	struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
+	struct mtk_cryp *cryp = sha->cryp;
+
+	mtk_sha_unmap(cryp, sha);
+	mtk_sha_complete(cryp, sha);
+}
+
+static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
+{
+	struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
+	struct mtk_cryp *cryp = sha->cryp;
+	u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
+
+	mtk_sha_write(cryp, RDR_STAT(sha->id), val);
+
+	if (likely((SHA_FLAGS_BUSY & sha->flags))) {
+		mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
+		mtk_sha_write(cryp, RDR_THRESH(sha->id),
+			      MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
+
+		tasklet_schedule(&sha->done_task);
+	} else {
+		dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
+	}
+	return IRQ_HANDLED;
+}
+
+/*
+ * The purpose of two SHA records is used to get extra performance.
+ * It is similar to mtk_aes_record_init().
+ */
+static int mtk_sha_record_init(struct mtk_cryp *cryp)
+{
+	struct mtk_sha_rec **sha = cryp->sha;
+	int i, err = -ENOMEM;
+
+	for (i = 0; i < MTK_REC_NUM; i++) {
+		sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
+		if (!sha[i])
+			goto err_cleanup;
+
+		sha[i]->cryp = cryp;
+
+		spin_lock_init(&sha[i]->lock);
+		crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
+
+		tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
+			     (unsigned long)sha[i]);
+		tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
+			     (unsigned long)sha[i]);
+	}
+
+	/* Link to ring2 and ring3 respectively */
+	sha[0]->id = MTK_RING2;
+	sha[1]->id = MTK_RING3;
+
+	cryp->rec = 1;
+
+	return 0;
+
+err_cleanup:
+	for (; i--; )
+		kfree(sha[i]);
+	return err;
+}
+
+static void mtk_sha_record_free(struct mtk_cryp *cryp)
+{
+	int i;
+
+	for (i = 0; i < MTK_REC_NUM; i++) {
+		tasklet_kill(&cryp->sha[i]->done_task);
+		tasklet_kill(&cryp->sha[i]->queue_task);
+
+		kfree(cryp->sha[i]);
+	}
+}
+
+static void mtk_sha_unregister_algs(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
+		crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
+
+	for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
+		crypto_unregister_ahash(&algs_sha384_sha512[i]);
+}
+
+static int mtk_sha_register_algs(void)
+{
+	int err, i;
+
+	for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
+		err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
+		if (err)
+			goto err_sha_224_256_algs;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
+		err = crypto_register_ahash(&algs_sha384_sha512[i]);
+		if (err)
+			goto err_sha_384_512_algs;
+	}
+
+	return 0;
+
+err_sha_384_512_algs:
+	for (; i--; )
+		crypto_unregister_ahash(&algs_sha384_sha512[i]);
+	i = ARRAY_SIZE(algs_sha1_sha224_sha256);
+err_sha_224_256_algs:
+	for (; i--; )
+		crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
+
+	return err;
+}
+
+int mtk_hash_alg_register(struct mtk_cryp *cryp)
+{
+	int err;
+
+	INIT_LIST_HEAD(&cryp->sha_list);
+
+	/* Initialize two hash records */
+	err = mtk_sha_record_init(cryp);
+	if (err)
+		goto err_record;
+
+	err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
+			       0, "mtk-sha", cryp->sha[0]);
+	if (err) {
+		dev_err(cryp->dev, "unable to request sha irq0.\n");
+		goto err_res;
+	}
+
+	err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
+			       0, "mtk-sha", cryp->sha[1]);
+	if (err) {
+		dev_err(cryp->dev, "unable to request sha irq1.\n");
+		goto err_res;
+	}
+
+	/* Enable ring2 and ring3 interrupt for hash */
+	mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
+	mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
+
+	spin_lock(&mtk_sha.lock);
+	list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
+	spin_unlock(&mtk_sha.lock);
+
+	err = mtk_sha_register_algs();
+	if (err)
+		goto err_algs;
+
+	return 0;
+
+err_algs:
+	spin_lock(&mtk_sha.lock);
+	list_del(&cryp->sha_list);
+	spin_unlock(&mtk_sha.lock);
+err_res:
+	mtk_sha_record_free(cryp);
+err_record:
+
+	dev_err(cryp->dev, "mtk-sha initialization failed.\n");
+	return err;
+}
+
+void mtk_hash_alg_release(struct mtk_cryp *cryp)
+{
+	spin_lock(&mtk_sha.lock);
+	list_del(&cryp->sha_list);
+	spin_unlock(&mtk_sha.lock);
+
+	mtk_sha_unregister_algs();
+	mtk_sha_record_free(cryp);
+}
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
new file mode 100644
index 0000000..e01c463
--- /dev/null
+++ b/drivers/crypto/mxc-scc.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ *
+ * The driver is based on information gathered from
+ * drivers/mxc/security/mxc_scc.c which can be found in
+ * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+/* Secure Memory (SCM) registers */
+#define SCC_SCM_RED_START		0x0000
+#define SCC_SCM_BLACK_START		0x0004
+#define SCC_SCM_LENGTH			0x0008
+#define SCC_SCM_CTRL			0x000C
+#define SCC_SCM_STATUS			0x0010
+#define SCC_SCM_ERROR_STATUS		0x0014
+#define SCC_SCM_INTR_CTRL		0x0018
+#define SCC_SCM_CFG			0x001C
+#define SCC_SCM_INIT_VECTOR_0		0x0020
+#define SCC_SCM_INIT_VECTOR_1		0x0024
+#define SCC_SCM_RED_MEMORY		0x0400
+#define SCC_SCM_BLACK_MEMORY		0x0800
+
+/* Security Monitor (SMN) Registers */
+#define SCC_SMN_STATUS			0x1000
+#define SCC_SMN_COMMAND		0x1004
+#define SCC_SMN_SEQ_START		0x1008
+#define SCC_SMN_SEQ_END		0x100C
+#define SCC_SMN_SEQ_CHECK		0x1010
+#define SCC_SMN_BIT_COUNT		0x1014
+#define SCC_SMN_BITBANK_INC_SIZE	0x1018
+#define SCC_SMN_BITBANK_DECREMENT	0x101C
+#define SCC_SMN_COMPARE_SIZE		0x1020
+#define SCC_SMN_PLAINTEXT_CHECK	0x1024
+#define SCC_SMN_CIPHERTEXT_CHECK	0x1028
+#define SCC_SMN_TIMER_IV		0x102C
+#define SCC_SMN_TIMER_CONTROL		0x1030
+#define SCC_SMN_DEBUG_DETECT_STAT	0x1034
+#define SCC_SMN_TIMER			0x1038
+
+#define SCC_SCM_CTRL_START_CIPHER	BIT(2)
+#define SCC_SCM_CTRL_CBC_MODE		BIT(1)
+#define SCC_SCM_CTRL_DECRYPT_MODE	BIT(0)
+
+#define SCC_SCM_STATUS_LEN_ERR		BIT(12)
+#define SCC_SCM_STATUS_SMN_UNBLOCKED	BIT(11)
+#define SCC_SCM_STATUS_CIPHERING_DONE	BIT(10)
+#define SCC_SCM_STATUS_ZEROIZING_DONE	BIT(9)
+#define SCC_SCM_STATUS_INTR_STATUS	BIT(8)
+#define SCC_SCM_STATUS_SEC_KEY		BIT(7)
+#define SCC_SCM_STATUS_INTERNAL_ERR	BIT(6)
+#define SCC_SCM_STATUS_BAD_SEC_KEY	BIT(5)
+#define SCC_SCM_STATUS_ZEROIZE_FAIL	BIT(4)
+#define SCC_SCM_STATUS_SMN_BLOCKED	BIT(3)
+#define SCC_SCM_STATUS_CIPHERING	BIT(2)
+#define SCC_SCM_STATUS_ZEROIZING	BIT(1)
+#define SCC_SCM_STATUS_BUSY		BIT(0)
+
+#define SCC_SMN_STATUS_STATE_MASK	0x0000001F
+#define SCC_SMN_STATE_START		0x0
+/* The SMN is zeroizing its RAM during reset */
+#define SCC_SMN_STATE_ZEROIZE_RAM	0x5
+/* SMN has passed internal checks */
+#define SCC_SMN_STATE_HEALTH_CHECK	0x6
+/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
+#define SCC_SMN_STATE_FAIL		0x9
+/* SCC is in secure state. SCM is using secret key. */
+#define SCC_SMN_STATE_SECURE		0xA
+/* SCC is not secure. SCM is using default key. */
+#define SCC_SMN_STATE_NON_SECURE	0xC
+
+#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM	BIT(2)
+#define SCC_SCM_INTR_CTRL_CLR_INTR	BIT(1)
+#define SCC_SCM_INTR_CTRL_MASK_INTR	BIT(0)
+
+/* Size, in blocks, of Red memory. */
+#define SCC_SCM_CFG_BLACK_SIZE_MASK	0x07fe0000
+#define SCC_SCM_CFG_BLACK_SIZE_SHIFT	17
+/* Size, in blocks, of Black memory. */
+#define SCC_SCM_CFG_RED_SIZE_MASK	0x0001ff80
+#define SCC_SCM_CFG_RED_SIZE_SHIFT	7
+/* Number of bytes per block. */
+#define SCC_SCM_CFG_BLOCK_SIZE_MASK	0x0000007f
+
+#define SCC_SMN_COMMAND_TAMPER_LOCK	BIT(4)
+#define SCC_SMN_COMMAND_CLR_INTR	BIT(3)
+#define SCC_SMN_COMMAND_CLR_BIT_BANK	BIT(2)
+#define SCC_SMN_COMMAND_EN_INTR	BIT(1)
+#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM  BIT(0)
+
+#define SCC_KEY_SLOTS			20
+#define SCC_MAX_KEY_SIZE		32
+#define SCC_KEY_SLOT_SIZE		32
+
+#define SCC_CRC_CCITT_START		0xFFFF
+
+/*
+ * Offset into each RAM of the base of the area which is not
+ * used for Stored Keys.
+ */
+#define SCC_NON_RESERVED_OFFSET	(SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
+
+/* Fixed padding for appending to plaintext to fill out a block */
+static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
+
+enum mxc_scc_state {
+	SCC_STATE_OK,
+	SCC_STATE_UNIMPLEMENTED,
+	SCC_STATE_FAILED
+};
+
+struct mxc_scc {
+	struct device		*dev;
+	void __iomem		*base;
+	struct clk		*clk;
+	bool			hw_busy;
+	spinlock_t		lock;
+	struct crypto_queue	queue;
+	struct crypto_async_request *req;
+	int			block_size_bytes;
+	int			black_ram_size_blocks;
+	int			memory_size_bytes;
+	int			bytes_remaining;
+
+	void __iomem		*red_memory;
+	void __iomem		*black_memory;
+};
+
+struct mxc_scc_ctx {
+	struct mxc_scc		*scc;
+	struct scatterlist	*sg_src;
+	size_t			src_nents;
+	struct scatterlist	*sg_dst;
+	size_t			dst_nents;
+	unsigned int		offset;
+	unsigned int		size;
+	unsigned int		ctrl;
+};
+
+struct mxc_scc_crypto_tmpl {
+	struct mxc_scc *scc;
+	struct crypto_alg alg;
+};
+
+static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
+			    struct crypto_async_request *req)
+{
+	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+	struct mxc_scc *scc = ctx->scc;
+	size_t len;
+	void __iomem *from;
+
+	if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
+		from = scc->red_memory;
+	else
+		from = scc->black_memory;
+
+	dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
+		ctx->dst_nents * 8);
+	len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
+				   from, ctx->size, ctx->offset);
+	if (!len) {
+		dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
+		return -EINVAL;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "red memory@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->red_memory, ctx->size, 1);
+	print_hex_dump(KERN_ERR,
+		       "black memory@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->black_memory, ctx->size, 1);
+#endif
+
+	ctx->offset += len;
+
+	if (ctx->offset < ablkreq->nbytes)
+		return -EINPROGRESS;
+
+	return 0;
+}
+
+static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
+				       struct mxc_scc_ctx *ctx)
+{
+	struct mxc_scc *scc = ctx->scc;
+	int nents;
+
+	nents = sg_nents_for_len(req->src, req->nbytes);
+	if (nents < 0) {
+		dev_err(scc->dev, "Invalid number of src SC");
+		return nents;
+	}
+	ctx->src_nents = nents;
+
+	nents = sg_nents_for_len(req->dst, req->nbytes);
+	if (nents < 0) {
+		dev_err(scc->dev, "Invalid number of dst SC");
+		return nents;
+	}
+	ctx->dst_nents = nents;
+
+	ctx->size = 0;
+	ctx->offset = 0;
+
+	return 0;
+}
+
+static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
+					   struct mxc_scc_ctx *ctx,
+					   int result)
+{
+	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+	struct mxc_scc *scc = ctx->scc;
+
+	scc->req = NULL;
+	scc->bytes_remaining = scc->memory_size_bytes;
+
+	if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
+		memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
+		       scc->block_size_bytes);
+
+	req->complete(req, result);
+	scc->hw_busy = false;
+
+	return 0;
+}
+
+static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
+			     struct ablkcipher_request *req)
+{
+	u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
+	size_t len = min_t(size_t, req->nbytes - ctx->offset,
+			   ctx->scc->bytes_remaining);
+	unsigned int padding_byte_count = 0;
+	struct mxc_scc *scc = ctx->scc;
+	void __iomem *to;
+
+	if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
+		to = scc->black_memory;
+	else
+		to = scc->red_memory;
+
+	if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
+		memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
+		       scc->block_size_bytes);
+
+	len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
+				 to, len, ctx->offset);
+	if (!len) {
+		dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
+		return -EINVAL;
+	}
+
+	ctx->size = len;
+
+#ifdef DEBUG
+	dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
+	print_hex_dump(KERN_ERR,
+		       "init vector0@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
+		       1);
+	print_hex_dump(KERN_ERR,
+		       "red memory@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->red_memory, ctx->size, 1);
+	print_hex_dump(KERN_ERR,
+		       "black memory@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->black_memory, ctx->size, 1);
+#endif
+
+	scc->bytes_remaining -= len;
+
+	padding_byte_count = len % scc->block_size_bytes;
+
+	if (padding_byte_count) {
+		memcpy(padding_buffer, scc_block_padding, padding_byte_count);
+		memcpy(to + len, padding_buffer, padding_byte_count);
+		ctx->size += padding_byte_count;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "data to encrypt@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       to, ctx->size, 1);
+#endif
+
+	return 0;
+}
+
+static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
+				    struct crypto_async_request *req)
+{
+	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+	struct mxc_scc *scc = ctx->scc;
+	int err;
+
+	dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
+		ablkreq->nbytes, ablkreq->src, ablkreq->dst);
+
+	writel(0, scc->base + SCC_SCM_ERROR_STATUS);
+
+	err = mxc_scc_put_data(ctx, ablkreq);
+	if (err) {
+		mxc_scc_ablkcipher_req_complete(req, ctx, err);
+		return;
+	}
+
+	dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
+		(void *)readl(scc->base + SCC_SCM_RED_START),
+		(void *)readl(scc->base + SCC_SCM_BLACK_START));
+
+	/* clear interrupt control registers */
+	writel(SCC_SCM_INTR_CTRL_CLR_INTR,
+	       scc->base + SCC_SCM_INTR_CTRL);
+
+	writel((ctx->size / ctx->scc->block_size_bytes) - 1,
+	       scc->base + SCC_SCM_LENGTH);
+
+	dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
+		ctx->size / ctx->scc->block_size_bytes,
+		(ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
+		scc->red_memory);
+
+	writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
+}
+
+static irqreturn_t mxc_scc_int(int irq, void *priv)
+{
+	struct crypto_async_request *req;
+	struct mxc_scc_ctx *ctx;
+	struct mxc_scc *scc = priv;
+	int status;
+	int ret;
+
+	status = readl(scc->base + SCC_SCM_STATUS);
+
+	/* clear interrupt control registers */
+	writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
+
+	if (status & SCC_SCM_STATUS_BUSY)
+		return IRQ_NONE;
+
+	req = scc->req;
+	if (req) {
+		ctx = crypto_tfm_ctx(req->tfm);
+		ret = mxc_scc_get_data(ctx, req);
+		if (ret != -EINPROGRESS)
+			mxc_scc_ablkcipher_req_complete(req, ctx, ret);
+		else
+			mxc_scc_ablkcipher_next(ctx, req);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int mxc_scc_cra_init(struct crypto_tfm *tfm)
+{
+	struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct mxc_scc_crypto_tmpl *algt;
+
+	algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
+
+	ctx->scc = algt->scc;
+	return 0;
+}
+
+static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
+{
+	struct crypto_async_request *req, *backlog;
+
+	if (ctx->scc->hw_busy)
+		return;
+
+	spin_lock_bh(&ctx->scc->lock);
+	backlog = crypto_get_backlog(&ctx->scc->queue);
+	req = crypto_dequeue_request(&ctx->scc->queue);
+	ctx->scc->req = req;
+	ctx->scc->hw_busy = true;
+	spin_unlock_bh(&ctx->scc->lock);
+
+	if (!req)
+		return;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	mxc_scc_ablkcipher_next(ctx, req);
+}
+
+static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
+			     struct crypto_async_request *req)
+{
+	int ret;
+
+	spin_lock_bh(&ctx->scc->lock);
+	ret = crypto_enqueue_request(&ctx->scc->queue, req);
+	spin_unlock_bh(&ctx->scc->lock);
+
+	if (ret != -EINPROGRESS)
+		return ret;
+
+	mxc_scc_dequeue_req_unlocked(ctx);
+
+	return -EINPROGRESS;
+}
+
+static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
+			   struct ablkcipher_request *req)
+{
+	int err;
+
+	err = mxc_scc_ablkcipher_req_init(req, ctx);
+	if (err)
+		return err;
+
+	return mxc_scc_queue_req(ctx, &req->base);
+}
+
+static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+
+	return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+	ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
+
+	return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+	ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
+
+	return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+	ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
+	ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
+
+	return mxc_scc_des3_op(ctx, req);
+}
+
+static void mxc_scc_hw_init(struct mxc_scc *scc)
+{
+	int offset;
+
+	offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
+
+	/* Fill the RED_START register */
+	writel(offset, scc->base + SCC_SCM_RED_START);
+
+	/* Fill the BLACK_START register */
+	writel(offset, scc->base + SCC_SCM_BLACK_START);
+
+	scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
+			  SCC_NON_RESERVED_OFFSET;
+
+	scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
+			    SCC_NON_RESERVED_OFFSET;
+
+	scc->bytes_remaining = scc->memory_size_bytes;
+}
+
+static int mxc_scc_get_config(struct mxc_scc *scc)
+{
+	int config;
+
+	config = readl(scc->base + SCC_SCM_CFG);
+
+	scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
+
+	scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
+
+	scc->memory_size_bytes = (scc->block_size_bytes *
+				  scc->black_ram_size_blocks) -
+				  SCC_NON_RESERVED_OFFSET;
+
+	return 0;
+}
+
+static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
+{
+	enum mxc_scc_state state;
+	int status;
+
+	status = readl(scc->base + SCC_SMN_STATUS) &
+		       SCC_SMN_STATUS_STATE_MASK;
+
+	/* If in Health Check, try to bringup to secure state */
+	if (status & SCC_SMN_STATE_HEALTH_CHECK) {
+		/*
+		 * Write a simple algorithm to the Algorithm Sequence
+		 * Checker (ASC)
+		 */
+		writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
+		writel(0x5555, scc->base + SCC_SMN_SEQ_END);
+		writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
+
+		status = readl(scc->base + SCC_SMN_STATUS) &
+			       SCC_SMN_STATUS_STATE_MASK;
+	}
+
+	switch (status) {
+	case SCC_SMN_STATE_NON_SECURE:
+	case SCC_SMN_STATE_SECURE:
+		state = SCC_STATE_OK;
+		break;
+	case SCC_SMN_STATE_FAIL:
+		state = SCC_STATE_FAILED;
+		break;
+	default:
+		state = SCC_STATE_UNIMPLEMENTED;
+		break;
+	}
+
+	return state;
+}
+
+static struct mxc_scc_crypto_tmpl scc_ecb_des = {
+	.alg = {
+		.cra_name = "ecb(des3_ede)",
+		.cra_driver_name = "ecb-des3-scc",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mxc_scc_ctx),
+		.cra_alignmask = 0,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_module = THIS_MODULE,
+		.cra_init = mxc_scc_cra_init,
+		.cra_u.ablkcipher = {
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.encrypt = mxc_scc_ecb_des_encrypt,
+			.decrypt = mxc_scc_ecb_des_decrypt,
+		}
+	}
+};
+
+static struct mxc_scc_crypto_tmpl scc_cbc_des = {
+	.alg = {
+		.cra_name = "cbc(des3_ede)",
+		.cra_driver_name = "cbc-des3-scc",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mxc_scc_ctx),
+		.cra_alignmask = 0,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_module = THIS_MODULE,
+		.cra_init = mxc_scc_cra_init,
+		.cra_u.ablkcipher = {
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.encrypt = mxc_scc_cbc_des_encrypt,
+			.decrypt = mxc_scc_cbc_des_decrypt,
+		}
+	}
+};
+
+static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
+	&scc_ecb_des,
+	&scc_cbc_des,
+};
+
+static int mxc_scc_crypto_register(struct mxc_scc *scc)
+{
+	int i;
+	int err = 0;
+
+	for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
+		scc_crypto_algs[i]->scc = scc;
+		err = crypto_register_alg(&scc_crypto_algs[i]->alg);
+		if (err)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	while (--i >= 0)
+		crypto_unregister_alg(&scc_crypto_algs[i]->alg);
+
+	return err;
+}
+
+static void mxc_scc_crypto_unregister(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
+		crypto_unregister_alg(&scc_crypto_algs[i]->alg);
+}
+
+static int mxc_scc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct mxc_scc *scc;
+	enum mxc_scc_state state;
+	int irq;
+	int ret;
+	int i;
+
+	scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
+	if (!scc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	scc->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(scc->base))
+		return PTR_ERR(scc->base);
+
+	scc->clk = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(scc->clk)) {
+		dev_err(dev, "Could not get ipg clock\n");
+		return PTR_ERR(scc->clk);
+	}
+
+	ret = clk_prepare_enable(scc->clk);
+	if (ret)
+		return ret;
+
+	/* clear error status register */
+	writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
+
+	/* clear interrupt control registers */
+	writel(SCC_SCM_INTR_CTRL_CLR_INTR |
+	       SCC_SCM_INTR_CTRL_MASK_INTR,
+	       scc->base + SCC_SCM_INTR_CTRL);
+
+	writel(SCC_SMN_COMMAND_CLR_INTR |
+	       SCC_SMN_COMMAND_EN_INTR,
+	       scc->base + SCC_SMN_COMMAND);
+
+	scc->dev = dev;
+	platform_set_drvdata(pdev, scc);
+
+	ret = mxc_scc_get_config(scc);
+	if (ret)
+		goto err_out;
+
+	state = mxc_scc_get_state(scc);
+
+	if (state != SCC_STATE_OK) {
+		dev_err(dev, "SCC in unusable state %d\n", state);
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	mxc_scc_hw_init(scc);
+
+	spin_lock_init(&scc->lock);
+	/* FIXME: calculate queue from RAM slots */
+	crypto_init_queue(&scc->queue, 50);
+
+	for (i = 0; i < 2; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0) {
+			dev_err(dev, "failed to get irq resource: %d\n", irq);
+			ret = irq;
+			goto err_out;
+		}
+
+		ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
+						IRQF_ONESHOT, dev_name(dev), scc);
+		if (ret)
+			goto err_out;
+	}
+
+	ret = mxc_scc_crypto_register(scc);
+	if (ret) {
+		dev_err(dev, "could not register algorithms");
+		goto err_out;
+	}
+
+	dev_info(dev, "registered successfully.\n");
+
+	return 0;
+
+err_out:
+	clk_disable_unprepare(scc->clk);
+
+	return ret;
+}
+
+static int mxc_scc_remove(struct platform_device *pdev)
+{
+	struct mxc_scc *scc = platform_get_drvdata(pdev);
+
+	mxc_scc_crypto_unregister();
+
+	clk_disable_unprepare(scc->clk);
+
+	return 0;
+}
+
+static const struct of_device_id mxc_scc_dt_ids[] = {
+	{ .compatible = "fsl,imx25-scc", .data = NULL, },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
+
+static struct platform_driver mxc_scc_driver = {
+	.probe	= mxc_scc_probe,
+	.remove	= mxc_scc_remove,
+	.driver	= {
+		.name		= "mxc-scc",
+		.of_match_table	= mxc_scc_dt_ids,
+	},
+};
+
+module_platform_driver(mxc_scc_driver);
+MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
new file mode 100644
index 0000000..56bd281
--- /dev/null
+++ b/drivers/crypto/mxs-dcp.c
@@ -0,0 +1,1122 @@
+/*
+ * Freescale i.MX23/i.MX28 Data Co-Processor driver
+ *
+ * Copyright (C) 2013 Marek Vasut <marex@denx.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/stmp_device.h>
+
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#define DCP_MAX_CHANS	4
+#define DCP_BUF_SZ	PAGE_SIZE
+
+#define DCP_ALIGNMENT	64
+
+/* DCP DMA descriptor. */
+struct dcp_dma_desc {
+	uint32_t	next_cmd_addr;
+	uint32_t	control0;
+	uint32_t	control1;
+	uint32_t	source;
+	uint32_t	destination;
+	uint32_t	size;
+	uint32_t	payload;
+	uint32_t	status;
+};
+
+/* Coherent aligned block for bounce buffering. */
+struct dcp_coherent_block {
+	uint8_t			aes_in_buf[DCP_BUF_SZ];
+	uint8_t			aes_out_buf[DCP_BUF_SZ];
+	uint8_t			sha_in_buf[DCP_BUF_SZ];
+
+	uint8_t			aes_key[2 * AES_KEYSIZE_128];
+
+	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
+};
+
+struct dcp {
+	struct device			*dev;
+	void __iomem			*base;
+
+	uint32_t			caps;
+
+	struct dcp_coherent_block	*coh;
+
+	struct completion		completion[DCP_MAX_CHANS];
+	spinlock_t			lock[DCP_MAX_CHANS];
+	struct task_struct		*thread[DCP_MAX_CHANS];
+	struct crypto_queue		queue[DCP_MAX_CHANS];
+};
+
+enum dcp_chan {
+	DCP_CHAN_HASH_SHA	= 0,
+	DCP_CHAN_CRYPTO		= 2,
+};
+
+struct dcp_async_ctx {
+	/* Common context */
+	enum dcp_chan	chan;
+	uint32_t	fill;
+
+	/* SHA Hash-specific context */
+	struct mutex			mutex;
+	uint32_t			alg;
+	unsigned int			hot:1;
+
+	/* Crypto-specific context */
+	struct crypto_skcipher		*fallback;
+	unsigned int			key_len;
+	uint8_t				key[AES_KEYSIZE_128];
+};
+
+struct dcp_aes_req_ctx {
+	unsigned int	enc:1;
+	unsigned int	ecb:1;
+};
+
+struct dcp_sha_req_ctx {
+	unsigned int	init:1;
+	unsigned int	fini:1;
+};
+
+/*
+ * There can even be only one instance of the MXS DCP due to the
+ * design of Linux Crypto API.
+ */
+static struct dcp *global_sdcp;
+
+/* DCP register layout. */
+#define MXS_DCP_CTRL				0x00
+#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
+#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
+
+#define MXS_DCP_STAT				0x10
+#define MXS_DCP_STAT_CLR			0x18
+#define MXS_DCP_STAT_IRQ_MASK			0xf
+
+#define MXS_DCP_CHANNELCTRL			0x20
+#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
+
+#define MXS_DCP_CAPABILITY1			0x40
+#define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
+#define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
+#define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
+
+#define MXS_DCP_CONTEXT				0x50
+
+#define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
+
+#define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
+
+#define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
+#define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
+
+/* DMA descriptor bits. */
+#define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
+#define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
+#define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
+#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
+#define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
+#define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
+#define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
+#define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
+#define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
+
+#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
+#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
+#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
+#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
+#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
+
+static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
+{
+	struct dcp *sdcp = global_sdcp;
+	const int chan = actx->chan;
+	uint32_t stat;
+	unsigned long ret;
+	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+
+	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
+					      DMA_TO_DEVICE);
+
+	reinit_completion(&sdcp->completion[chan]);
+
+	/* Clear status register. */
+	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
+
+	/* Load the DMA descriptor. */
+	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
+
+	/* Increment the semaphore to start the DMA transfer. */
+	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
+
+	ret = wait_for_completion_timeout(&sdcp->completion[chan],
+					  msecs_to_jiffies(1000));
+	if (!ret) {
+		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
+			chan, readl(sdcp->base + MXS_DCP_STAT));
+		return -ETIMEDOUT;
+	}
+
+	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
+	if (stat & 0xff) {
+		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
+			chan, stat);
+		return -EINVAL;
+	}
+
+	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
+
+	return 0;
+}
+
+/*
+ * Encryption (AES128)
+ */
+static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
+			   struct ablkcipher_request *req, int init)
+{
+	struct dcp *sdcp = global_sdcp;
+	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+	int ret;
+
+	dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+					     2 * AES_KEYSIZE_128,
+					     DMA_TO_DEVICE);
+	dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
+					     DCP_BUF_SZ, DMA_TO_DEVICE);
+	dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
+					     DCP_BUF_SZ, DMA_FROM_DEVICE);
+
+	/* Fill in the DMA descriptor. */
+	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
+		    MXS_DCP_CONTROL0_INTERRUPT |
+		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
+
+	/* Payload contains the key. */
+	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+
+	if (rctx->enc)
+		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
+	if (init)
+		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
+
+	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
+
+	if (rctx->ecb)
+		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
+	else
+		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
+
+	desc->next_cmd_addr = 0;
+	desc->source = src_phys;
+	desc->destination = dst_phys;
+	desc->size = actx->fill;
+	desc->payload = key_phys;
+	desc->status = 0;
+
+	ret = mxs_dcp_start_dma(actx);
+
+	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
+			 DMA_TO_DEVICE);
+	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
+
+	return ret;
+}
+
+static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
+{
+	struct dcp *sdcp = global_sdcp;
+
+	struct ablkcipher_request *req = ablkcipher_request_cast(arq);
+	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
+	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+	struct scatterlist *dst = req->dst;
+	struct scatterlist *src = req->src;
+	const int nents = sg_nents(req->src);
+
+	const int out_off = DCP_BUF_SZ;
+	uint8_t *in_buf = sdcp->coh->aes_in_buf;
+	uint8_t *out_buf = sdcp->coh->aes_out_buf;
+
+	uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
+	uint32_t dst_off = 0;
+
+	uint8_t *key = sdcp->coh->aes_key;
+
+	int ret = 0;
+	int split = 0;
+	unsigned int i, len, clen, rem = 0;
+	int init = 0;
+
+	actx->fill = 0;
+
+	/* Copy the key from the temporary location. */
+	memcpy(key, actx->key, actx->key_len);
+
+	if (!rctx->ecb) {
+		/* Copy the CBC IV just past the key. */
+		memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
+		/* CBC needs the INIT set. */
+		init = 1;
+	} else {
+		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
+	}
+
+	for_each_sg(req->src, src, nents, i) {
+		src_buf = sg_virt(src);
+		len = sg_dma_len(src);
+
+		do {
+			if (actx->fill + len > out_off)
+				clen = out_off - actx->fill;
+			else
+				clen = len;
+
+			memcpy(in_buf + actx->fill, src_buf, clen);
+			len -= clen;
+			src_buf += clen;
+			actx->fill += clen;
+
+			/*
+			 * If we filled the buffer or this is the last SG,
+			 * submit the buffer.
+			 */
+			if (actx->fill == out_off || sg_is_last(src)) {
+				ret = mxs_dcp_run_aes(actx, req, init);
+				if (ret)
+					return ret;
+				init = 0;
+
+				out_tmp = out_buf;
+				while (dst && actx->fill) {
+					if (!split) {
+						dst_buf = sg_virt(dst);
+						dst_off = 0;
+					}
+					rem = min(sg_dma_len(dst) - dst_off,
+						  actx->fill);
+
+					memcpy(dst_buf + dst_off, out_tmp, rem);
+					out_tmp += rem;
+					dst_off += rem;
+					actx->fill -= rem;
+
+					if (dst_off == sg_dma_len(dst)) {
+						dst = sg_next(dst);
+						split = 0;
+					} else {
+						split = 1;
+					}
+				}
+			}
+		} while (len);
+	}
+
+	return ret;
+}
+
+static int dcp_chan_thread_aes(void *data)
+{
+	struct dcp *sdcp = global_sdcp;
+	const int chan = DCP_CHAN_CRYPTO;
+
+	struct crypto_async_request *backlog;
+	struct crypto_async_request *arq;
+
+	int ret;
+
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock(&sdcp->lock[chan]);
+		backlog = crypto_get_backlog(&sdcp->queue[chan]);
+		arq = crypto_dequeue_request(&sdcp->queue[chan]);
+		spin_unlock(&sdcp->lock[chan]);
+
+		if (!backlog && !arq) {
+			schedule();
+			continue;
+		}
+
+		set_current_state(TASK_RUNNING);
+
+		if (backlog)
+			backlog->complete(backlog, -EINPROGRESS);
+
+		if (arq) {
+			ret = mxs_dcp_aes_block_crypt(arq);
+			arq->complete(arq, ret);
+		}
+	}
+
+	return 0;
+}
+
+static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+	int ret;
+
+	skcipher_request_set_tfm(subreq, ctx->fallback);
+	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+				   req->nbytes, req->info);
+
+	if (enc)
+		ret = crypto_skcipher_encrypt(subreq);
+	else
+		ret = crypto_skcipher_decrypt(subreq);
+
+	skcipher_request_zero(subreq);
+
+	return ret;
+}
+
+static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
+{
+	struct dcp *sdcp = global_sdcp;
+	struct crypto_async_request *arq = &req->base;
+	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
+	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+	int ret;
+
+	if (unlikely(actx->key_len != AES_KEYSIZE_128))
+		return mxs_dcp_block_fallback(req, enc);
+
+	rctx->enc = enc;
+	rctx->ecb = ecb;
+	actx->chan = DCP_CHAN_CRYPTO;
+
+	spin_lock(&sdcp->lock[actx->chan]);
+	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
+	spin_unlock(&sdcp->lock[actx->chan]);
+
+	wake_up_process(sdcp->thread[actx->chan]);
+
+	return -EINPROGRESS;
+}
+
+static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return mxs_dcp_aes_enqueue(req, 0, 1);
+}
+
+static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return mxs_dcp_aes_enqueue(req, 1, 1);
+}
+
+static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return mxs_dcp_aes_enqueue(req, 0, 0);
+}
+
+static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return mxs_dcp_aes_enqueue(req, 1, 0);
+}
+
+static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			      unsigned int len)
+{
+	struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
+	unsigned int ret;
+
+	/*
+	 * AES 128 is supposed by the hardware, store key into temporary
+	 * buffer and exit. We must use the temporary buffer here, since
+	 * there can still be an operation in progress.
+	 */
+	actx->key_len = len;
+	if (len == AES_KEYSIZE_128) {
+		memcpy(actx->key, key, len);
+		return 0;
+	}
+
+	/*
+	 * If the requested AES key size is not supported by the hardware,
+	 * but is supported by in-kernel software implementation, we use
+	 * software fallback.
+	 */
+	crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(actx->fallback,
+				  tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+	ret = crypto_skcipher_setkey(actx->fallback, key, len);
+	if (!ret)
+		return 0;
+
+	tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
+	tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
+			       CRYPTO_TFM_RES_MASK;
+
+	return ret;
+}
+
+static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
+{
+	const char *name = crypto_tfm_alg_name(tfm);
+	const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+	struct crypto_skcipher *blk;
+
+	blk = crypto_alloc_skcipher(name, 0, flags);
+	if (IS_ERR(blk))
+		return PTR_ERR(blk);
+
+	actx->fallback = blk;
+	tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
+	return 0;
+}
+
+static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
+{
+	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
+
+	crypto_free_skcipher(actx->fallback);
+}
+
+/*
+ * Hashing (SHA1/SHA256)
+ */
+static int mxs_dcp_run_sha(struct ahash_request *req)
+{
+	struct dcp *sdcp = global_sdcp;
+	int ret;
+
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+
+	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+
+	dma_addr_t digest_phys = 0;
+	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
+					     DCP_BUF_SZ, DMA_TO_DEVICE);
+
+	/* Fill in the DMA descriptor. */
+	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
+		    MXS_DCP_CONTROL0_INTERRUPT |
+		    MXS_DCP_CONTROL0_ENABLE_HASH;
+	if (rctx->init)
+		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
+
+	desc->control1 = actx->alg;
+	desc->next_cmd_addr = 0;
+	desc->source = buf_phys;
+	desc->destination = 0;
+	desc->size = actx->fill;
+	desc->payload = 0;
+	desc->status = 0;
+
+	/* Set HASH_TERM bit for last transfer block. */
+	if (rctx->fini) {
+		digest_phys = dma_map_single(sdcp->dev, req->result,
+					     halg->digestsize, DMA_FROM_DEVICE);
+		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
+		desc->payload = digest_phys;
+	}
+
+	ret = mxs_dcp_start_dma(actx);
+
+	if (rctx->fini)
+		dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
+				 DMA_FROM_DEVICE);
+
+	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+
+	return ret;
+}
+
+static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
+{
+	struct dcp *sdcp = global_sdcp;
+
+	struct ahash_request *req = ahash_request_cast(arq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+	const int nents = sg_nents(req->src);
+
+	uint8_t *in_buf = sdcp->coh->sha_in_buf;
+
+	uint8_t *src_buf;
+
+	struct scatterlist *src;
+
+	unsigned int i, len, clen;
+	int ret;
+
+	int fin = rctx->fini;
+	if (fin)
+		rctx->fini = 0;
+
+	for_each_sg(req->src, src, nents, i) {
+		src_buf = sg_virt(src);
+		len = sg_dma_len(src);
+
+		do {
+			if (actx->fill + len > DCP_BUF_SZ)
+				clen = DCP_BUF_SZ - actx->fill;
+			else
+				clen = len;
+
+			memcpy(in_buf + actx->fill, src_buf, clen);
+			len -= clen;
+			src_buf += clen;
+			actx->fill += clen;
+
+			/*
+			 * If we filled the buffer and still have some
+			 * more data, submit the buffer.
+			 */
+			if (len && actx->fill == DCP_BUF_SZ) {
+				ret = mxs_dcp_run_sha(req);
+				if (ret)
+					return ret;
+				actx->fill = 0;
+				rctx->init = 0;
+			}
+		} while (len);
+	}
+
+	if (fin) {
+		rctx->fini = 1;
+
+		/* Submit whatever is left. */
+		if (!req->result)
+			return -EINVAL;
+
+		ret = mxs_dcp_run_sha(req);
+		if (ret)
+			return ret;
+
+		actx->fill = 0;
+
+		/* For some reason, the result is flipped. */
+		for (i = 0; i < halg->digestsize / 2; i++) {
+			swap(req->result[i],
+			     req->result[halg->digestsize - i - 1]);
+		}
+	}
+
+	return 0;
+}
+
+static int dcp_chan_thread_sha(void *data)
+{
+	struct dcp *sdcp = global_sdcp;
+	const int chan = DCP_CHAN_HASH_SHA;
+
+	struct crypto_async_request *backlog;
+	struct crypto_async_request *arq;
+
+	struct dcp_sha_req_ctx *rctx;
+
+	struct ahash_request *req;
+	int ret, fini;
+
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		spin_lock(&sdcp->lock[chan]);
+		backlog = crypto_get_backlog(&sdcp->queue[chan]);
+		arq = crypto_dequeue_request(&sdcp->queue[chan]);
+		spin_unlock(&sdcp->lock[chan]);
+
+		if (!backlog && !arq) {
+			schedule();
+			continue;
+		}
+
+		set_current_state(TASK_RUNNING);
+
+		if (backlog)
+			backlog->complete(backlog, -EINPROGRESS);
+
+		if (arq) {
+			req = ahash_request_cast(arq);
+			rctx = ahash_request_ctx(req);
+
+			ret = dcp_sha_req_to_buf(arq);
+			fini = rctx->fini;
+			arq->complete(arq, ret);
+		}
+	}
+
+	return 0;
+}
+
+static int dcp_sha_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+
+	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+
+	/*
+	 * Start hashing session. The code below only inits the
+	 * hashing session context, nothing more.
+	 */
+	memset(actx, 0, sizeof(*actx));
+
+	if (strcmp(halg->base.cra_name, "sha1") == 0)
+		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
+	else
+		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
+
+	actx->fill = 0;
+	actx->hot = 0;
+	actx->chan = DCP_CHAN_HASH_SHA;
+
+	mutex_init(&actx->mutex);
+
+	return 0;
+}
+
+static int dcp_sha_update_fx(struct ahash_request *req, int fini)
+{
+	struct dcp *sdcp = global_sdcp;
+
+	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+
+	int ret;
+
+	/*
+	 * Ignore requests that have no data in them and are not
+	 * the trailing requests in the stream of requests.
+	 */
+	if (!req->nbytes && !fini)
+		return 0;
+
+	mutex_lock(&actx->mutex);
+
+	rctx->fini = fini;
+
+	if (!actx->hot) {
+		actx->hot = 1;
+		rctx->init = 1;
+	}
+
+	spin_lock(&sdcp->lock[actx->chan]);
+	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
+	spin_unlock(&sdcp->lock[actx->chan]);
+
+	wake_up_process(sdcp->thread[actx->chan]);
+	mutex_unlock(&actx->mutex);
+
+	return -EINPROGRESS;
+}
+
+static int dcp_sha_update(struct ahash_request *req)
+{
+	return dcp_sha_update_fx(req, 0);
+}
+
+static int dcp_sha_final(struct ahash_request *req)
+{
+	ahash_request_set_crypt(req, NULL, req->result, 0);
+	req->nbytes = 0;
+	return dcp_sha_update_fx(req, 1);
+}
+
+static int dcp_sha_finup(struct ahash_request *req)
+{
+	return dcp_sha_update_fx(req, 1);
+}
+
+static int dcp_sha_digest(struct ahash_request *req)
+{
+	int ret;
+
+	ret = dcp_sha_init(req);
+	if (ret)
+		return ret;
+
+	return dcp_sha_finup(req);
+}
+
+static int dcp_sha_noimport(struct ahash_request *req, const void *in)
+{
+	return -ENOSYS;
+}
+
+static int dcp_sha_noexport(struct ahash_request *req, void *out)
+{
+	return -ENOSYS;
+}
+
+static int dcp_sha_cra_init(struct crypto_tfm *tfm)
+{
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct dcp_sha_req_ctx));
+	return 0;
+}
+
+static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+/* AES 128 ECB and AES 128 CBC */
+static struct crypto_alg dcp_aes_algs[] = {
+	{
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "ecb-aes-dcp",
+		.cra_priority		= 400,
+		.cra_alignmask		= 15,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_init		= mxs_dcp_aes_fallback_init,
+		.cra_exit		= mxs_dcp_aes_fallback_exit,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_u	= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= mxs_dcp_aes_setkey,
+				.encrypt	= mxs_dcp_aes_ecb_encrypt,
+				.decrypt	= mxs_dcp_aes_ecb_decrypt
+			},
+		},
+	}, {
+		.cra_name		= "cbc(aes)",
+		.cra_driver_name	= "cbc-aes-dcp",
+		.cra_priority		= 400,
+		.cra_alignmask		= 15,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_init		= mxs_dcp_aes_fallback_init,
+		.cra_exit		= mxs_dcp_aes_fallback_exit,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_u = {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= mxs_dcp_aes_setkey,
+				.encrypt	= mxs_dcp_aes_cbc_encrypt,
+				.decrypt	= mxs_dcp_aes_cbc_decrypt,
+				.ivsize		= AES_BLOCK_SIZE,
+			},
+		},
+	},
+};
+
+/* SHA1 */
+static struct ahash_alg dcp_sha1_alg = {
+	.init	= dcp_sha_init,
+	.update	= dcp_sha_update,
+	.final	= dcp_sha_final,
+	.finup	= dcp_sha_finup,
+	.digest	= dcp_sha_digest,
+	.import = dcp_sha_noimport,
+	.export = dcp_sha_noexport,
+	.halg	= {
+		.digestsize	= SHA1_DIGEST_SIZE,
+		.base		= {
+			.cra_name		= "sha1",
+			.cra_driver_name	= "sha1-dcp",
+			.cra_priority		= 400,
+			.cra_alignmask		= 63,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA1_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
+			.cra_module		= THIS_MODULE,
+			.cra_init		= dcp_sha_cra_init,
+			.cra_exit		= dcp_sha_cra_exit,
+		},
+	},
+};
+
+/* SHA256 */
+static struct ahash_alg dcp_sha256_alg = {
+	.init	= dcp_sha_init,
+	.update	= dcp_sha_update,
+	.final	= dcp_sha_final,
+	.finup	= dcp_sha_finup,
+	.digest	= dcp_sha_digest,
+	.import = dcp_sha_noimport,
+	.export = dcp_sha_noexport,
+	.halg	= {
+		.digestsize	= SHA256_DIGEST_SIZE,
+		.base		= {
+			.cra_name		= "sha256",
+			.cra_driver_name	= "sha256-dcp",
+			.cra_priority		= 400,
+			.cra_alignmask		= 63,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
+			.cra_blocksize		= SHA256_BLOCK_SIZE,
+			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
+			.cra_module		= THIS_MODULE,
+			.cra_init		= dcp_sha_cra_init,
+			.cra_exit		= dcp_sha_cra_exit,
+		},
+	},
+};
+
+static irqreturn_t mxs_dcp_irq(int irq, void *context)
+{
+	struct dcp *sdcp = context;
+	uint32_t stat;
+	int i;
+
+	stat = readl(sdcp->base + MXS_DCP_STAT);
+	stat &= MXS_DCP_STAT_IRQ_MASK;
+	if (!stat)
+		return IRQ_NONE;
+
+	/* Clear the interrupts. */
+	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
+
+	/* Complete the DMA requests that finished. */
+	for (i = 0; i < DCP_MAX_CHANS; i++)
+		if (stat & (1 << i))
+			complete(&sdcp->completion[i]);
+
+	return IRQ_HANDLED;
+}
+
+static int mxs_dcp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dcp *sdcp = NULL;
+	int i, ret;
+
+	struct resource *iores;
+	int dcp_vmi_irq, dcp_irq;
+
+	if (global_sdcp) {
+		dev_err(dev, "Only one DCP instance allowed!\n");
+		return -ENODEV;
+	}
+
+	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dcp_vmi_irq = platform_get_irq(pdev, 0);
+	if (dcp_vmi_irq < 0) {
+		dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
+		return dcp_vmi_irq;
+	}
+
+	dcp_irq = platform_get_irq(pdev, 1);
+	if (dcp_irq < 0) {
+		dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
+		return dcp_irq;
+	}
+
+	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
+	if (!sdcp)
+		return -ENOMEM;
+
+	sdcp->dev = dev;
+	sdcp->base = devm_ioremap_resource(dev, iores);
+	if (IS_ERR(sdcp->base))
+		return PTR_ERR(sdcp->base);
+
+
+	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
+			       "dcp-vmi-irq", sdcp);
+	if (ret) {
+		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
+		return ret;
+	}
+
+	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
+			       "dcp-irq", sdcp);
+	if (ret) {
+		dev_err(dev, "Failed to claim DCP IRQ!\n");
+		return ret;
+	}
+
+	/* Allocate coherent helper block. */
+	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
+				   GFP_KERNEL);
+	if (!sdcp->coh)
+		return -ENOMEM;
+
+	/* Re-align the structure so it fits the DCP constraints. */
+	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
+
+	/* Restart the DCP block. */
+	ret = stmp_reset_block(sdcp->base);
+	if (ret)
+		return ret;
+
+	/* Initialize control register. */
+	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
+	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
+	       sdcp->base + MXS_DCP_CTRL);
+
+	/* Enable all DCP DMA channels. */
+	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
+	       sdcp->base + MXS_DCP_CHANNELCTRL);
+
+	/*
+	 * We do not enable context switching. Give the context buffer a
+	 * pointer to an illegal address so if context switching is
+	 * inadvertantly enabled, the DCP will return an error instead of
+	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
+	 * address will do.
+	 */
+	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
+	for (i = 0; i < DCP_MAX_CHANS; i++)
+		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
+	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
+
+	global_sdcp = sdcp;
+
+	platform_set_drvdata(pdev, sdcp);
+
+	for (i = 0; i < DCP_MAX_CHANS; i++) {
+		spin_lock_init(&sdcp->lock[i]);
+		init_completion(&sdcp->completion[i]);
+		crypto_init_queue(&sdcp->queue[i], 50);
+	}
+
+	/* Create the SHA and AES handler threads. */
+	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
+						      NULL, "mxs_dcp_chan/sha");
+	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
+		dev_err(dev, "Error starting SHA thread!\n");
+		return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
+	}
+
+	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
+						    NULL, "mxs_dcp_chan/aes");
+	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
+		dev_err(dev, "Error starting SHA thread!\n");
+		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
+		goto err_destroy_sha_thread;
+	}
+
+	/* Register the various crypto algorithms. */
+	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
+
+	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
+		ret = crypto_register_algs(dcp_aes_algs,
+					   ARRAY_SIZE(dcp_aes_algs));
+		if (ret) {
+			/* Failed to register algorithm. */
+			dev_err(dev, "Failed to register AES crypto!\n");
+			goto err_destroy_aes_thread;
+		}
+	}
+
+	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
+		ret = crypto_register_ahash(&dcp_sha1_alg);
+		if (ret) {
+			dev_err(dev, "Failed to register %s hash!\n",
+				dcp_sha1_alg.halg.base.cra_name);
+			goto err_unregister_aes;
+		}
+	}
+
+	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
+		ret = crypto_register_ahash(&dcp_sha256_alg);
+		if (ret) {
+			dev_err(dev, "Failed to register %s hash!\n",
+				dcp_sha256_alg.halg.base.cra_name);
+			goto err_unregister_sha1;
+		}
+	}
+
+	return 0;
+
+err_unregister_sha1:
+	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
+		crypto_unregister_ahash(&dcp_sha1_alg);
+
+err_unregister_aes:
+	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
+		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+
+err_destroy_aes_thread:
+	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+
+err_destroy_sha_thread:
+	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
+	return ret;
+}
+
+static int mxs_dcp_remove(struct platform_device *pdev)
+{
+	struct dcp *sdcp = platform_get_drvdata(pdev);
+
+	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
+		crypto_unregister_ahash(&dcp_sha256_alg);
+
+	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
+		crypto_unregister_ahash(&dcp_sha1_alg);
+
+	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
+		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
+
+	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
+	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+
+	platform_set_drvdata(pdev, NULL);
+
+	global_sdcp = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id mxs_dcp_dt_ids[] = {
+	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
+	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
+
+static struct platform_driver mxs_dcp_driver = {
+	.probe	= mxs_dcp_probe,
+	.remove	= mxs_dcp_remove,
+	.driver	= {
+		.name		= "mxs-dcp",
+		.of_match_table	= mxs_dcp_dt_ids,
+	},
+};
+
+module_platform_driver(mxs_dcp_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Freescale MXS DCP Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-dcp");
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
new file mode 100644
index 0000000..9a67dbf
--- /dev/null
+++ b/drivers/crypto/n2_asm.S
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* n2_asm.S: Hypervisor calls for NCS support.
+ *
+ * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2_core.h"
+
+	/* o0: queue type
+	 * o1: RA of queue
+	 * o2: num entries in queue
+	 * o3: address of queue handle return
+	 */
+ENTRY(sun4v_ncs_qconf)
+	mov	HV_FAST_NCS_QCONF, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o3]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_qconf)
+
+	/* %o0: queue handle
+	 * %o1: address of queue type return
+	 * %o2: address of queue base address return
+	 * %o3: address of queue num entries return
+	 */
+ENTRY(sun4v_ncs_qinfo)
+	mov	%o1, %g1
+	mov	%o2, %g2
+	mov	%o3, %g3
+	mov	HV_FAST_NCS_QINFO, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%g1]
+	stx	%o2, [%g2]
+	stx	%o3, [%g3]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_qinfo)
+
+	/* %o0: queue handle
+	 * %o1: address of head offset return
+	 */
+ENTRY(sun4v_ncs_gethead)
+	mov	%o1, %o2
+	mov	HV_FAST_NCS_GETHEAD, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o2]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_gethead)
+
+	/* %o0: queue handle
+	 * %o1: address of tail offset return
+	 */
+ENTRY(sun4v_ncs_gettail)
+	mov	%o1, %o2
+	mov	HV_FAST_NCS_GETTAIL, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o2]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_gettail)
+
+	/* %o0: queue handle
+	 * %o1: new tail offset
+	 */
+ENTRY(sun4v_ncs_settail)
+	mov	HV_FAST_NCS_SETTAIL, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+ENDPROC(sun4v_ncs_settail)
+
+	/* %o0: queue handle
+	 * %o1: address of devino return
+	 */
+ENTRY(sun4v_ncs_qhandle_to_devino)
+	mov	%o1, %o2
+	mov	HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
+	ta	HV_FAST_TRAP
+	stx	%o1, [%o2]
+	retl
+	 nop
+ENDPROC(sun4v_ncs_qhandle_to_devino)
+
+	/* %o0: queue handle
+	 * %o1: new head offset
+	 */
+ENTRY(sun4v_ncs_sethead_marker)
+	mov	HV_FAST_NCS_SETHEAD_MARKER, %o5
+	ta	HV_FAST_TRAP
+	retl
+	 nop
+ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
new file mode 100644
index 0000000..55f34cf
--- /dev/null
+++ b/drivers/crypto/n2_core.c
@@ -0,0 +1,2248 @@
+/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
+ *
+ * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#include "n2_core.h"
+
+#define DRV_MODULE_NAME		"n2_crypto"
+#define DRV_MODULE_VERSION	"0.2"
+#define DRV_MODULE_RELDATE	"July 28, 2011"
+
+static const char version[] =
+	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 Crypto driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define N2_CRA_PRIORITY		200
+
+static DEFINE_MUTEX(spu_lock);
+
+struct spu_queue {
+	cpumask_t		sharing;
+	unsigned long		qhandle;
+
+	spinlock_t		lock;
+	u8			q_type;
+	void			*q;
+	unsigned long		head;
+	unsigned long		tail;
+	struct list_head	jobs;
+
+	unsigned long		devino;
+
+	char			irq_name[32];
+	unsigned int		irq;
+
+	struct list_head	list;
+};
+
+struct spu_qreg {
+	struct spu_queue	*queue;
+	unsigned long		type;
+};
+
+static struct spu_queue **cpu_to_cwq;
+static struct spu_queue **cpu_to_mau;
+
+static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
+{
+	if (q->q_type == HV_NCS_QTYPE_MAU) {
+		off += MAU_ENTRY_SIZE;
+		if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
+			off = 0;
+	} else {
+		off += CWQ_ENTRY_SIZE;
+		if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
+			off = 0;
+	}
+	return off;
+}
+
+struct n2_request_common {
+	struct list_head	entry;
+	unsigned int		offset;
+};
+#define OFFSET_NOT_RUNNING	(~(unsigned int)0)
+
+/* An async job request records the final tail value it used in
+ * n2_request_common->offset, test to see if that offset is in
+ * the range old_head, new_head, inclusive.
+ */
+static inline bool job_finished(struct spu_queue *q, unsigned int offset,
+				unsigned long old_head, unsigned long new_head)
+{
+	if (old_head <= new_head) {
+		if (offset > old_head && offset <= new_head)
+			return true;
+	} else {
+		if (offset > old_head || offset <= new_head)
+			return true;
+	}
+	return false;
+}
+
+/* When the HEAD marker is unequal to the actual HEAD, we get
+ * a virtual device INO interrupt.  We should process the
+ * completed CWQ entries and adjust the HEAD marker to clear
+ * the IRQ.
+ */
+static irqreturn_t cwq_intr(int irq, void *dev_id)
+{
+	unsigned long off, new_head, hv_ret;
+	struct spu_queue *q = dev_id;
+
+	pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
+	       smp_processor_id(), q->qhandle);
+
+	spin_lock(&q->lock);
+
+	hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
+
+	pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
+	       smp_processor_id(), new_head, hv_ret);
+
+	for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
+		/* XXX ... XXX */
+	}
+
+	hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
+	if (hv_ret == HV_EOK)
+		q->head = new_head;
+
+	spin_unlock(&q->lock);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mau_intr(int irq, void *dev_id)
+{
+	struct spu_queue *q = dev_id;
+	unsigned long head, hv_ret;
+
+	spin_lock(&q->lock);
+
+	pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
+	       smp_processor_id(), q->qhandle);
+
+	hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
+
+	pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
+	       smp_processor_id(), head, hv_ret);
+
+	sun4v_ncs_sethead_marker(q->qhandle, head);
+
+	spin_unlock(&q->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void *spu_queue_next(struct spu_queue *q, void *cur)
+{
+	return q->q + spu_next_offset(q, cur - q->q);
+}
+
+static int spu_queue_num_free(struct spu_queue *q)
+{
+	unsigned long head = q->head;
+	unsigned long tail = q->tail;
+	unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
+	unsigned long diff;
+
+	if (head > tail)
+		diff = head - tail;
+	else
+		diff = (end - tail) + head;
+
+	return (diff / CWQ_ENTRY_SIZE) - 1;
+}
+
+static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
+{
+	int avail = spu_queue_num_free(q);
+
+	if (avail >= num_entries)
+		return q->q + q->tail;
+
+	return NULL;
+}
+
+static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
+{
+	unsigned long hv_ret, new_tail;
+
+	new_tail = spu_next_offset(q, last - q->q);
+
+	hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
+	if (hv_ret == HV_EOK)
+		q->tail = new_tail;
+	return hv_ret;
+}
+
+static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
+			     int enc_type, int auth_type,
+			     unsigned int hash_len,
+			     bool sfas, bool sob, bool eob, bool encrypt,
+			     int opcode)
+{
+	u64 word = (len - 1) & CONTROL_LEN;
+
+	word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
+	word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
+	word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
+	if (sfas)
+		word |= CONTROL_STORE_FINAL_AUTH_STATE;
+	if (sob)
+		word |= CONTROL_START_OF_BLOCK;
+	if (eob)
+		word |= CONTROL_END_OF_BLOCK;
+	if (encrypt)
+		word |= CONTROL_ENCRYPT;
+	if (hmac_key_len)
+		word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
+	if (hash_len)
+		word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
+
+	return word;
+}
+
+#if 0
+static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
+{
+	if (this_len >= 64 ||
+	    qp->head != qp->tail)
+		return true;
+	return false;
+}
+#endif
+
+struct n2_ahash_alg {
+	struct list_head	entry;
+	const u8		*hash_zero;
+	const u32		*hash_init;
+	u8			hw_op_hashsz;
+	u8			digest_size;
+	u8			auth_type;
+	u8			hmac_type;
+	struct ahash_alg	alg;
+};
+
+static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct ahash_alg *ahash_alg;
+
+	ahash_alg = container_of(alg, struct ahash_alg, halg.base);
+
+	return container_of(ahash_alg, struct n2_ahash_alg, alg);
+}
+
+struct n2_hmac_alg {
+	const char		*child_alg;
+	struct n2_ahash_alg	derived;
+};
+
+static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct ahash_alg *ahash_alg;
+
+	ahash_alg = container_of(alg, struct ahash_alg, halg.base);
+
+	return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
+}
+
+struct n2_hash_ctx {
+	struct crypto_ahash		*fallback_tfm;
+};
+
+#define N2_HASH_KEY_MAX			32 /* HW limit for all HMAC requests */
+
+struct n2_hmac_ctx {
+	struct n2_hash_ctx		base;
+
+	struct crypto_shash		*child_shash;
+
+	int				hash_key_len;
+	unsigned char			hash_key[N2_HASH_KEY_MAX];
+};
+
+struct n2_hash_req_ctx {
+	union {
+		struct md5_state	md5;
+		struct sha1_state	sha1;
+		struct sha256_state	sha256;
+	} u;
+
+	struct ahash_request		fallback_req;
+};
+
+static int n2_hash_async_init(struct ahash_request *req)
+{
+	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int n2_hash_async_update(struct ahash_request *req)
+{
+	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->fallback_req.nbytes = req->nbytes;
+	rctx->fallback_req.src = req->src;
+
+	return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int n2_hash_async_final(struct ahash_request *req)
+{
+	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->fallback_req.result = req->result;
+
+	return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int n2_hash_async_finup(struct ahash_request *req)
+{
+	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->fallback_req.nbytes = req->nbytes;
+	rctx->fallback_req.src = req->src;
+	rctx->fallback_req.result = req->result;
+
+	return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
+{
+	return -ENOSYS;
+}
+
+static int n2_hash_async_noexport(struct ahash_request *req, void *out)
+{
+	return -ENOSYS;
+}
+
+static int n2_hash_cra_init(struct crypto_tfm *tfm)
+{
+	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct crypto_ahash *fallback_tfm;
+	int err;
+
+	fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+					  CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback_tfm)) {
+		pr_warning("Fallback driver '%s' could not be loaded!\n",
+			   fallback_driver_name);
+		err = PTR_ERR(fallback_tfm);
+		goto out;
+	}
+
+	crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
+					 crypto_ahash_reqsize(fallback_tfm)));
+
+	ctx->fallback_tfm = fallback_tfm;
+	return 0;
+
+out:
+	return err;
+}
+
+static void n2_hash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+	crypto_free_ahash(ctx->fallback_tfm);
+}
+
+static int n2_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
+	struct crypto_ahash *fallback_tfm;
+	struct crypto_shash *child_shash;
+	int err;
+
+	fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+					  CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback_tfm)) {
+		pr_warning("Fallback driver '%s' could not be loaded!\n",
+			   fallback_driver_name);
+		err = PTR_ERR(fallback_tfm);
+		goto out;
+	}
+
+	child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
+	if (IS_ERR(child_shash)) {
+		pr_warning("Child shash '%s' could not be loaded!\n",
+			   n2alg->child_alg);
+		err = PTR_ERR(child_shash);
+		goto out_free_fallback;
+	}
+
+	crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
+					 crypto_ahash_reqsize(fallback_tfm)));
+
+	ctx->child_shash = child_shash;
+	ctx->base.fallback_tfm = fallback_tfm;
+	return 0;
+
+out_free_fallback:
+	crypto_free_ahash(fallback_tfm);
+
+out:
+	return err;
+}
+
+static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
+
+	crypto_free_ahash(ctx->base.fallback_tfm);
+	crypto_free_shash(ctx->child_shash);
+}
+
+static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
+				unsigned int keylen)
+{
+	struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct crypto_shash *child_shash = ctx->child_shash;
+	struct crypto_ahash *fallback_tfm;
+	SHASH_DESC_ON_STACK(shash, child_shash);
+	int err, bs, ds;
+
+	fallback_tfm = ctx->base.fallback_tfm;
+	err = crypto_ahash_setkey(fallback_tfm, key, keylen);
+	if (err)
+		return err;
+
+	shash->tfm = child_shash;
+	shash->flags = crypto_ahash_get_flags(tfm) &
+		CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	bs = crypto_shash_blocksize(child_shash);
+	ds = crypto_shash_digestsize(child_shash);
+	BUG_ON(ds > N2_HASH_KEY_MAX);
+	if (keylen > bs) {
+		err = crypto_shash_digest(shash, key, keylen,
+					  ctx->hash_key);
+		if (err)
+			return err;
+		keylen = ds;
+	} else if (keylen <= N2_HASH_KEY_MAX)
+		memcpy(ctx->hash_key, key, keylen);
+
+	ctx->hash_key_len = keylen;
+
+	return err;
+}
+
+static unsigned long wait_for_tail(struct spu_queue *qp)
+{
+	unsigned long head, hv_ret;
+
+	do {
+		hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
+		if (hv_ret != HV_EOK) {
+			pr_err("Hypervisor error on gethead\n");
+			break;
+		}
+		if (head == qp->tail) {
+			qp->head = head;
+			break;
+		}
+	} while (1);
+	return hv_ret;
+}
+
+static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
+					      struct cwq_initial_entry *ent)
+{
+	unsigned long hv_ret = spu_queue_submit(qp, ent);
+
+	if (hv_ret == HV_EOK)
+		hv_ret = wait_for_tail(qp);
+
+	return hv_ret;
+}
+
+static int n2_do_async_digest(struct ahash_request *req,
+			      unsigned int auth_type, unsigned int digest_size,
+			      unsigned int result_size, void *hash_loc,
+			      unsigned long auth_key, unsigned int auth_key_len)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct cwq_initial_entry *ent;
+	struct crypto_hash_walk walk;
+	struct spu_queue *qp;
+	unsigned long flags;
+	int err = -ENODEV;
+	int nbytes, cpu;
+
+	/* The total effective length of the operation may not
+	 * exceed 2^16.
+	 */
+	if (unlikely(req->nbytes > (1 << 16))) {
+		struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+		struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+		ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+		rctx->fallback_req.base.flags =
+			req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+		rctx->fallback_req.nbytes = req->nbytes;
+		rctx->fallback_req.src = req->src;
+		rctx->fallback_req.result = req->result;
+
+		return crypto_ahash_digest(&rctx->fallback_req);
+	}
+
+	nbytes = crypto_hash_walk_first(req, &walk);
+
+	cpu = get_cpu();
+	qp = cpu_to_cwq[cpu];
+	if (!qp)
+		goto out;
+
+	spin_lock_irqsave(&qp->lock, flags);
+
+	/* XXX can do better, improve this later by doing a by-hand scatterlist
+	 * XXX walk, etc.
+	 */
+	ent = qp->q + qp->tail;
+
+	ent->control = control_word_base(nbytes, auth_key_len, 0,
+					 auth_type, digest_size,
+					 false, true, false, false,
+					 OPCODE_INPLACE_BIT |
+					 OPCODE_AUTH_MAC);
+	ent->src_addr = __pa(walk.data);
+	ent->auth_key_addr = auth_key;
+	ent->auth_iv_addr = __pa(hash_loc);
+	ent->final_auth_state_addr = 0UL;
+	ent->enc_key_addr = 0UL;
+	ent->enc_iv_addr = 0UL;
+	ent->dest_addr = __pa(hash_loc);
+
+	nbytes = crypto_hash_walk_done(&walk, 0);
+	while (nbytes > 0) {
+		ent = spu_queue_next(qp, ent);
+
+		ent->control = (nbytes - 1);
+		ent->src_addr = __pa(walk.data);
+		ent->auth_key_addr = 0UL;
+		ent->auth_iv_addr = 0UL;
+		ent->final_auth_state_addr = 0UL;
+		ent->enc_key_addr = 0UL;
+		ent->enc_iv_addr = 0UL;
+		ent->dest_addr = 0UL;
+
+		nbytes = crypto_hash_walk_done(&walk, 0);
+	}
+	ent->control |= CONTROL_END_OF_BLOCK;
+
+	if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
+		err = -EINVAL;
+	else
+		err = 0;
+
+	spin_unlock_irqrestore(&qp->lock, flags);
+
+	if (!err)
+		memcpy(req->result, hash_loc, result_size);
+out:
+	put_cpu();
+
+	return err;
+}
+
+static int n2_hash_async_digest(struct ahash_request *req)
+{
+	struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
+	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+	int ds;
+
+	ds = n2alg->digest_size;
+	if (unlikely(req->nbytes == 0)) {
+		memcpy(req->result, n2alg->hash_zero, ds);
+		return 0;
+	}
+	memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
+
+	return n2_do_async_digest(req, n2alg->auth_type,
+				  n2alg->hw_op_hashsz, ds,
+				  &rctx->u, 0UL, 0);
+}
+
+static int n2_hmac_async_digest(struct ahash_request *req)
+{
+	struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
+	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
+	int ds;
+
+	ds = n2alg->derived.digest_size;
+	if (unlikely(req->nbytes == 0) ||
+	    unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
+		struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
+		struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+		ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+		rctx->fallback_req.base.flags =
+			req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+		rctx->fallback_req.nbytes = req->nbytes;
+		rctx->fallback_req.src = req->src;
+		rctx->fallback_req.result = req->result;
+
+		return crypto_ahash_digest(&rctx->fallback_req);
+	}
+	memcpy(&rctx->u, n2alg->derived.hash_init,
+	       n2alg->derived.hw_op_hashsz);
+
+	return n2_do_async_digest(req, n2alg->derived.hmac_type,
+				  n2alg->derived.hw_op_hashsz, ds,
+				  &rctx->u,
+				  __pa(&ctx->hash_key),
+				  ctx->hash_key_len);
+}
+
+struct n2_cipher_context {
+	int			key_len;
+	int			enc_type;
+	union {
+		u8		aes[AES_MAX_KEY_SIZE];
+		u8		des[DES_KEY_SIZE];
+		u8		des3[3 * DES_KEY_SIZE];
+		u8		arc4[258]; /* S-box, X, Y */
+	} key;
+};
+
+#define N2_CHUNK_ARR_LEN	16
+
+struct n2_crypto_chunk {
+	struct list_head	entry;
+	unsigned long		iv_paddr : 44;
+	unsigned long		arr_len : 20;
+	unsigned long		dest_paddr;
+	unsigned long		dest_final;
+	struct {
+		unsigned long	src_paddr : 44;
+		unsigned long	src_len : 20;
+	} arr[N2_CHUNK_ARR_LEN];
+};
+
+struct n2_request_context {
+	struct ablkcipher_walk	walk;
+	struct list_head	chunk_list;
+	struct n2_crypto_chunk	chunk;
+	u8			temp_iv[16];
+};
+
+/* The SPU allows some level of flexibility for partial cipher blocks
+ * being specified in a descriptor.
+ *
+ * It merely requires that every descriptor's length field is at least
+ * as large as the cipher block size.  This means that a cipher block
+ * can span at most 2 descriptors.  However, this does not allow a
+ * partial block to span into the final descriptor as that would
+ * violate the rule (since every descriptor's length must be at lest
+ * the block size).  So, for example, assuming an 8 byte block size:
+ *
+ *	0xe --> 0xa --> 0x8
+ *
+ * is a valid length sequence, whereas:
+ *
+ *	0xe --> 0xb --> 0x7
+ *
+ * is not a valid sequence.
+ */
+
+struct n2_cipher_alg {
+	struct list_head	entry;
+	u8			enc_type;
+	struct crypto_alg	alg;
+};
+
+static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+
+	return container_of(alg, struct n2_cipher_alg, alg);
+}
+
+struct n2_cipher_request_context {
+	struct ablkcipher_walk	walk;
+};
+
+static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			 unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+	ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		ctx->enc_type |= ENC_TYPE_ALG_AES128;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->enc_type |= ENC_TYPE_ALG_AES192;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->enc_type |= ENC_TYPE_ALG_AES256;
+		break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx->key_len = keylen;
+	memcpy(ctx->key.aes, key, keylen);
+	return 0;
+}
+
+static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			 unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int err;
+
+	ctx->enc_type = n2alg->enc_type;
+
+	if (keylen != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	err = des_ekey(tmp, key);
+	if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	ctx->key_len = keylen;
+	memcpy(ctx->key.des, key, keylen);
+	return 0;
+}
+
+static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			  unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+	ctx->enc_type = n2alg->enc_type;
+
+	if (keylen != (3 * DES_KEY_SIZE)) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->key_len = keylen;
+	memcpy(ctx->key.des3, key, keylen);
+	return 0;
+}
+
+static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			  unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+	u8 *s = ctx->key.arc4;
+	u8 *x = s + 256;
+	u8 *y = x + 1;
+	int i, j, k;
+
+	ctx->enc_type = n2alg->enc_type;
+
+	j = k = 0;
+	*x = 0;
+	*y = 0;
+	for (i = 0; i < 256; i++)
+		s[i] = i;
+	for (i = 0; i < 256; i++) {
+		u8 a = s[i];
+		j = (j + key[k] + a) & 0xff;
+		s[i] = s[j];
+		s[j] = a;
+		if (++k >= keylen)
+			k = 0;
+	}
+
+	return 0;
+}
+
+static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+{
+	int this_len = nbytes;
+
+	this_len -= (nbytes & (block_size - 1));
+	return this_len > (1 << 16) ? (1 << 16) : this_len;
+}
+
+static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+			    struct spu_queue *qp, bool encrypt)
+{
+	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+	struct cwq_initial_entry *ent;
+	bool in_place;
+	int i;
+
+	ent = spu_queue_alloc(qp, cp->arr_len);
+	if (!ent) {
+		pr_info("queue_alloc() of %d fails\n",
+			cp->arr_len);
+		return -EBUSY;
+	}
+
+	in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
+
+	ent->control = control_word_base(cp->arr[0].src_len,
+					 0, ctx->enc_type, 0, 0,
+					 false, true, false, encrypt,
+					 OPCODE_ENCRYPT |
+					 (in_place ? OPCODE_INPLACE_BIT : 0));
+	ent->src_addr = cp->arr[0].src_paddr;
+	ent->auth_key_addr = 0UL;
+	ent->auth_iv_addr = 0UL;
+	ent->final_auth_state_addr = 0UL;
+	ent->enc_key_addr = __pa(&ctx->key);
+	ent->enc_iv_addr = cp->iv_paddr;
+	ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
+
+	for (i = 1; i < cp->arr_len; i++) {
+		ent = spu_queue_next(qp, ent);
+
+		ent->control = cp->arr[i].src_len - 1;
+		ent->src_addr = cp->arr[i].src_paddr;
+		ent->auth_key_addr = 0UL;
+		ent->auth_iv_addr = 0UL;
+		ent->final_auth_state_addr = 0UL;
+		ent->enc_key_addr = 0UL;
+		ent->enc_iv_addr = 0UL;
+		ent->dest_addr = 0UL;
+	}
+	ent->control |= CONTROL_END_OF_BLOCK;
+
+	return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
+}
+
+static int n2_compute_chunks(struct ablkcipher_request *req)
+{
+	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+	struct ablkcipher_walk *walk = &rctx->walk;
+	struct n2_crypto_chunk *chunk;
+	unsigned long dest_prev;
+	unsigned int tot_len;
+	bool prev_in_place;
+	int err, nbytes;
+
+	ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
+	err = ablkcipher_walk_phys(req, walk);
+	if (err)
+		return err;
+
+	INIT_LIST_HEAD(&rctx->chunk_list);
+
+	chunk = &rctx->chunk;
+	INIT_LIST_HEAD(&chunk->entry);
+
+	chunk->iv_paddr = 0UL;
+	chunk->arr_len = 0;
+	chunk->dest_paddr = 0UL;
+
+	prev_in_place = false;
+	dest_prev = ~0UL;
+	tot_len = 0;
+
+	while ((nbytes = walk->nbytes) != 0) {
+		unsigned long dest_paddr, src_paddr;
+		bool in_place;
+		int this_len;
+
+		src_paddr = (page_to_phys(walk->src.page) +
+			     walk->src.offset);
+		dest_paddr = (page_to_phys(walk->dst.page) +
+			      walk->dst.offset);
+		in_place = (src_paddr == dest_paddr);
+		this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+
+		if (chunk->arr_len != 0) {
+			if (in_place != prev_in_place ||
+			    (!prev_in_place &&
+			     dest_paddr != dest_prev) ||
+			    chunk->arr_len == N2_CHUNK_ARR_LEN ||
+			    tot_len + this_len > (1 << 16)) {
+				chunk->dest_final = dest_prev;
+				list_add_tail(&chunk->entry,
+					      &rctx->chunk_list);
+				chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
+				if (!chunk) {
+					err = -ENOMEM;
+					break;
+				}
+				INIT_LIST_HEAD(&chunk->entry);
+			}
+		}
+		if (chunk->arr_len == 0) {
+			chunk->dest_paddr = dest_paddr;
+			tot_len = 0;
+		}
+		chunk->arr[chunk->arr_len].src_paddr = src_paddr;
+		chunk->arr[chunk->arr_len].src_len = this_len;
+		chunk->arr_len++;
+
+		dest_prev = dest_paddr + this_len;
+		prev_in_place = in_place;
+		tot_len += this_len;
+
+		err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+		if (err)
+			break;
+	}
+	if (!err && chunk->arr_len != 0) {
+		chunk->dest_final = dest_prev;
+		list_add_tail(&chunk->entry, &rctx->chunk_list);
+	}
+
+	return err;
+}
+
+static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+{
+	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+	struct n2_crypto_chunk *c, *tmp;
+
+	if (final_iv)
+		memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
+
+	ablkcipher_walk_complete(&rctx->walk);
+	list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+		list_del(&c->entry);
+		if (unlikely(c != &rctx->chunk))
+			kfree(c);
+	}
+
+}
+
+static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+{
+	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+	struct crypto_tfm *tfm = req->base.tfm;
+	int err = n2_compute_chunks(req);
+	struct n2_crypto_chunk *c, *tmp;
+	unsigned long flags, hv_ret;
+	struct spu_queue *qp;
+
+	if (err)
+		return err;
+
+	qp = cpu_to_cwq[get_cpu()];
+	err = -ENODEV;
+	if (!qp)
+		goto out;
+
+	spin_lock_irqsave(&qp->lock, flags);
+
+	list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+		err = __n2_crypt_chunk(tfm, c, qp, encrypt);
+		if (err)
+			break;
+		list_del(&c->entry);
+		if (unlikely(c != &rctx->chunk))
+			kfree(c);
+	}
+	if (!err) {
+		hv_ret = wait_for_tail(qp);
+		if (hv_ret != HV_EOK)
+			err = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&qp->lock, flags);
+
+out:
+	put_cpu();
+
+	n2_chunk_complete(req, NULL);
+	return err;
+}
+
+static int n2_encrypt_ecb(struct ablkcipher_request *req)
+{
+	return n2_do_ecb(req, true);
+}
+
+static int n2_decrypt_ecb(struct ablkcipher_request *req)
+{
+	return n2_do_ecb(req, false);
+}
+
+static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+{
+	struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+	struct crypto_tfm *tfm = req->base.tfm;
+	unsigned long flags, hv_ret, iv_paddr;
+	int err = n2_compute_chunks(req);
+	struct n2_crypto_chunk *c, *tmp;
+	struct spu_queue *qp;
+	void *final_iv_addr;
+
+	final_iv_addr = NULL;
+
+	if (err)
+		return err;
+
+	qp = cpu_to_cwq[get_cpu()];
+	err = -ENODEV;
+	if (!qp)
+		goto out;
+
+	spin_lock_irqsave(&qp->lock, flags);
+
+	if (encrypt) {
+		iv_paddr = __pa(rctx->walk.iv);
+		list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
+					 entry) {
+			c->iv_paddr = iv_paddr;
+			err = __n2_crypt_chunk(tfm, c, qp, true);
+			if (err)
+				break;
+			iv_paddr = c->dest_final - rctx->walk.blocksize;
+			list_del(&c->entry);
+			if (unlikely(c != &rctx->chunk))
+				kfree(c);
+		}
+		final_iv_addr = __va(iv_paddr);
+	} else {
+		list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
+						 entry) {
+			if (c == &rctx->chunk) {
+				iv_paddr = __pa(rctx->walk.iv);
+			} else {
+				iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
+					    tmp->arr[tmp->arr_len-1].src_len -
+					    rctx->walk.blocksize);
+			}
+			if (!final_iv_addr) {
+				unsigned long pa;
+
+				pa = (c->arr[c->arr_len-1].src_paddr +
+				      c->arr[c->arr_len-1].src_len -
+				      rctx->walk.blocksize);
+				final_iv_addr = rctx->temp_iv;
+				memcpy(rctx->temp_iv, __va(pa),
+				       rctx->walk.blocksize);
+			}
+			c->iv_paddr = iv_paddr;
+			err = __n2_crypt_chunk(tfm, c, qp, false);
+			if (err)
+				break;
+			list_del(&c->entry);
+			if (unlikely(c != &rctx->chunk))
+				kfree(c);
+		}
+	}
+	if (!err) {
+		hv_ret = wait_for_tail(qp);
+		if (hv_ret != HV_EOK)
+			err = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&qp->lock, flags);
+
+out:
+	put_cpu();
+
+	n2_chunk_complete(req, err ? NULL : final_iv_addr);
+	return err;
+}
+
+static int n2_encrypt_chaining(struct ablkcipher_request *req)
+{
+	return n2_do_chaining(req, true);
+}
+
+static int n2_decrypt_chaining(struct ablkcipher_request *req)
+{
+	return n2_do_chaining(req, false);
+}
+
+struct n2_cipher_tmpl {
+	const char		*name;
+	const char		*drv_name;
+	u8			block_size;
+	u8			enc_type;
+	struct ablkcipher_alg	ablkcipher;
+};
+
+static const struct n2_cipher_tmpl cipher_tmpls[] = {
+	/* ARC4: only ECB is supported (chaining bits ignored) */
+	{	.name		= "ecb(arc4)",
+		.drv_name	= "ecb-arc4",
+		.block_size	= 1,
+		.enc_type	= (ENC_TYPE_ALG_RC4_STREAM |
+				   ENC_TYPE_CHAINING_ECB),
+		.ablkcipher	= {
+			.min_keysize	= 1,
+			.max_keysize	= 256,
+			.setkey		= n2_arc4_setkey,
+			.encrypt	= n2_encrypt_ecb,
+			.decrypt	= n2_decrypt_ecb,
+		},
+	},
+
+	/* DES: ECB CBC and CFB are supported */
+	{	.name		= "ecb(des)",
+		.drv_name	= "ecb-des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_DES |
+				   ENC_TYPE_CHAINING_ECB),
+		.ablkcipher	= {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= n2_des_setkey,
+			.encrypt	= n2_encrypt_ecb,
+			.decrypt	= n2_decrypt_ecb,
+		},
+	},
+	{	.name		= "cbc(des)",
+		.drv_name	= "cbc-des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_DES |
+				   ENC_TYPE_CHAINING_CBC),
+		.ablkcipher	= {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= n2_des_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+	{	.name		= "cfb(des)",
+		.drv_name	= "cfb-des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_DES |
+				   ENC_TYPE_CHAINING_CFB),
+		.ablkcipher	= {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= n2_des_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+
+	/* 3DES: ECB CBC and CFB are supported */
+	{	.name		= "ecb(des3_ede)",
+		.drv_name	= "ecb-3des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_3DES |
+				   ENC_TYPE_CHAINING_ECB),
+		.ablkcipher	= {
+			.min_keysize	= 3 * DES_KEY_SIZE,
+			.max_keysize	= 3 * DES_KEY_SIZE,
+			.setkey		= n2_3des_setkey,
+			.encrypt	= n2_encrypt_ecb,
+			.decrypt	= n2_decrypt_ecb,
+		},
+	},
+	{	.name		= "cbc(des3_ede)",
+		.drv_name	= "cbc-3des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_3DES |
+				   ENC_TYPE_CHAINING_CBC),
+		.ablkcipher	= {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= 3 * DES_KEY_SIZE,
+			.max_keysize	= 3 * DES_KEY_SIZE,
+			.setkey		= n2_3des_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+	{	.name		= "cfb(des3_ede)",
+		.drv_name	= "cfb-3des",
+		.block_size	= DES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_3DES |
+				   ENC_TYPE_CHAINING_CFB),
+		.ablkcipher	= {
+			.min_keysize	= 3 * DES_KEY_SIZE,
+			.max_keysize	= 3 * DES_KEY_SIZE,
+			.setkey		= n2_3des_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+	/* AES: ECB CBC and CTR are supported */
+	{	.name		= "ecb(aes)",
+		.drv_name	= "ecb-aes",
+		.block_size	= AES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_AES128 |
+				   ENC_TYPE_CHAINING_ECB),
+		.ablkcipher	= {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= n2_aes_setkey,
+			.encrypt	= n2_encrypt_ecb,
+			.decrypt	= n2_decrypt_ecb,
+		},
+	},
+	{	.name		= "cbc(aes)",
+		.drv_name	= "cbc-aes",
+		.block_size	= AES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_AES128 |
+				   ENC_TYPE_CHAINING_CBC),
+		.ablkcipher	= {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= n2_aes_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_decrypt_chaining,
+		},
+	},
+	{	.name		= "ctr(aes)",
+		.drv_name	= "ctr-aes",
+		.block_size	= AES_BLOCK_SIZE,
+		.enc_type	= (ENC_TYPE_ALG_AES128 |
+				   ENC_TYPE_CHAINING_COUNTER),
+		.ablkcipher	= {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= n2_aes_setkey,
+			.encrypt	= n2_encrypt_chaining,
+			.decrypt	= n2_encrypt_chaining,
+		},
+	},
+
+};
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+
+static LIST_HEAD(cipher_algs);
+
+struct n2_hash_tmpl {
+	const char	*name;
+	const u8	*hash_zero;
+	const u32	*hash_init;
+	u8		hw_op_hashsz;
+	u8		digest_size;
+	u8		block_size;
+	u8		auth_type;
+	u8		hmac_type;
+};
+
+static const u32 md5_init[MD5_HASH_WORDS] = {
+	cpu_to_le32(MD5_H0),
+	cpu_to_le32(MD5_H1),
+	cpu_to_le32(MD5_H2),
+	cpu_to_le32(MD5_H3),
+};
+static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
+	SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
+};
+static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
+	SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+	SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
+};
+static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
+	SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+	SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
+};
+
+static const struct n2_hash_tmpl hash_tmpls[] = {
+	{ .name		= "md5",
+	  .hash_zero	= md5_zero_message_hash,
+	  .hash_init	= md5_init,
+	  .auth_type	= AUTH_TYPE_MD5,
+	  .hmac_type	= AUTH_TYPE_HMAC_MD5,
+	  .hw_op_hashsz	= MD5_DIGEST_SIZE,
+	  .digest_size	= MD5_DIGEST_SIZE,
+	  .block_size	= MD5_HMAC_BLOCK_SIZE },
+	{ .name		= "sha1",
+	  .hash_zero	= sha1_zero_message_hash,
+	  .hash_init	= sha1_init,
+	  .auth_type	= AUTH_TYPE_SHA1,
+	  .hmac_type	= AUTH_TYPE_HMAC_SHA1,
+	  .hw_op_hashsz	= SHA1_DIGEST_SIZE,
+	  .digest_size	= SHA1_DIGEST_SIZE,
+	  .block_size	= SHA1_BLOCK_SIZE },
+	{ .name		= "sha256",
+	  .hash_zero	= sha256_zero_message_hash,
+	  .hash_init	= sha256_init,
+	  .auth_type	= AUTH_TYPE_SHA256,
+	  .hmac_type	= AUTH_TYPE_HMAC_SHA256,
+	  .hw_op_hashsz	= SHA256_DIGEST_SIZE,
+	  .digest_size	= SHA256_DIGEST_SIZE,
+	  .block_size	= SHA256_BLOCK_SIZE },
+	{ .name		= "sha224",
+	  .hash_zero	= sha224_zero_message_hash,
+	  .hash_init	= sha224_init,
+	  .auth_type	= AUTH_TYPE_SHA256,
+	  .hmac_type	= AUTH_TYPE_RESERVED,
+	  .hw_op_hashsz	= SHA256_DIGEST_SIZE,
+	  .digest_size	= SHA224_DIGEST_SIZE,
+	  .block_size	= SHA224_BLOCK_SIZE },
+};
+#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
+
+static LIST_HEAD(ahash_algs);
+static LIST_HEAD(hmac_algs);
+
+static int algs_registered;
+
+static void __n2_unregister_algs(void)
+{
+	struct n2_cipher_alg *cipher, *cipher_tmp;
+	struct n2_ahash_alg *alg, *alg_tmp;
+	struct n2_hmac_alg *hmac, *hmac_tmp;
+
+	list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
+		crypto_unregister_alg(&cipher->alg);
+		list_del(&cipher->entry);
+		kfree(cipher);
+	}
+	list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
+		crypto_unregister_ahash(&hmac->derived.alg);
+		list_del(&hmac->derived.entry);
+		kfree(hmac);
+	}
+	list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
+		crypto_unregister_ahash(&alg->alg);
+		list_del(&alg->entry);
+		kfree(alg);
+	}
+}
+
+static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+	return 0;
+}
+
+static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+{
+	struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+	struct crypto_alg *alg;
+	int err;
+
+	if (!p)
+		return -ENOMEM;
+
+	alg = &p->alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+	alg->cra_priority = N2_CRA_PRIORITY;
+	alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+			 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+	alg->cra_blocksize = tmpl->block_size;
+	p->enc_type = tmpl->enc_type;
+	alg->cra_ctxsize = sizeof(struct n2_cipher_context);
+	alg->cra_type = &crypto_ablkcipher_type;
+	alg->cra_u.ablkcipher = tmpl->ablkcipher;
+	alg->cra_init = n2_cipher_cra_init;
+	alg->cra_module = THIS_MODULE;
+
+	list_add(&p->entry, &cipher_algs);
+	err = crypto_register_alg(alg);
+	if (err) {
+		pr_err("%s alg registration failed\n", alg->cra_name);
+		list_del(&p->entry);
+		kfree(p);
+	} else {
+		pr_info("%s alg registered\n", alg->cra_name);
+	}
+	return err;
+}
+
+static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
+{
+	struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+	struct ahash_alg *ahash;
+	struct crypto_alg *base;
+	int err;
+
+	if (!p)
+		return -ENOMEM;
+
+	p->child_alg = n2ahash->alg.halg.base.cra_name;
+	memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
+	INIT_LIST_HEAD(&p->derived.entry);
+
+	ahash = &p->derived.alg;
+	ahash->digest = n2_hmac_async_digest;
+	ahash->setkey = n2_hmac_async_setkey;
+
+	base = &ahash->halg.base;
+	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
+	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
+
+	base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
+	base->cra_init = n2_hmac_cra_init;
+	base->cra_exit = n2_hmac_cra_exit;
+
+	list_add(&p->derived.entry, &hmac_algs);
+	err = crypto_register_ahash(ahash);
+	if (err) {
+		pr_err("%s alg registration failed\n", base->cra_name);
+		list_del(&p->derived.entry);
+		kfree(p);
+	} else {
+		pr_info("%s alg registered\n", base->cra_name);
+	}
+	return err;
+}
+
+static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+{
+	struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+	struct hash_alg_common *halg;
+	struct crypto_alg *base;
+	struct ahash_alg *ahash;
+	int err;
+
+	if (!p)
+		return -ENOMEM;
+
+	p->hash_zero = tmpl->hash_zero;
+	p->hash_init = tmpl->hash_init;
+	p->auth_type = tmpl->auth_type;
+	p->hmac_type = tmpl->hmac_type;
+	p->hw_op_hashsz = tmpl->hw_op_hashsz;
+	p->digest_size = tmpl->digest_size;
+
+	ahash = &p->alg;
+	ahash->init = n2_hash_async_init;
+	ahash->update = n2_hash_async_update;
+	ahash->final = n2_hash_async_final;
+	ahash->finup = n2_hash_async_finup;
+	ahash->digest = n2_hash_async_digest;
+	ahash->export = n2_hash_async_noexport;
+	ahash->import = n2_hash_async_noimport;
+
+	halg = &ahash->halg;
+	halg->digestsize = tmpl->digest_size;
+
+	base = &halg->base;
+	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
+	base->cra_priority = N2_CRA_PRIORITY;
+	base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+			  CRYPTO_ALG_NEED_FALLBACK;
+	base->cra_blocksize = tmpl->block_size;
+	base->cra_ctxsize = sizeof(struct n2_hash_ctx);
+	base->cra_module = THIS_MODULE;
+	base->cra_init = n2_hash_cra_init;
+	base->cra_exit = n2_hash_cra_exit;
+
+	list_add(&p->entry, &ahash_algs);
+	err = crypto_register_ahash(ahash);
+	if (err) {
+		pr_err("%s alg registration failed\n", base->cra_name);
+		list_del(&p->entry);
+		kfree(p);
+	} else {
+		pr_info("%s alg registered\n", base->cra_name);
+	}
+	if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
+		err = __n2_register_one_hmac(p);
+	return err;
+}
+
+static int n2_register_algs(void)
+{
+	int i, err = 0;
+
+	mutex_lock(&spu_lock);
+	if (algs_registered++)
+		goto out;
+
+	for (i = 0; i < NUM_HASH_TMPLS; i++) {
+		err = __n2_register_one_ahash(&hash_tmpls[i]);
+		if (err) {
+			__n2_unregister_algs();
+			goto out;
+		}
+	}
+	for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
+		err = __n2_register_one_cipher(&cipher_tmpls[i]);
+		if (err) {
+			__n2_unregister_algs();
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&spu_lock);
+	return err;
+}
+
+static void n2_unregister_algs(void)
+{
+	mutex_lock(&spu_lock);
+	if (!--algs_registered)
+		__n2_unregister_algs();
+	mutex_unlock(&spu_lock);
+}
+
+/* To map CWQ queues to interrupt sources, the hypervisor API provides
+ * a devino.  This isn't very useful to us because all of the
+ * interrupts listed in the device_node have been translated to
+ * Linux virtual IRQ cookie numbers.
+ *
+ * So we have to back-translate, going through the 'intr' and 'ino'
+ * property tables of the n2cp MDESC node, matching it with the OF
+ * 'interrupts' property entries, in order to to figure out which
+ * devino goes to which already-translated IRQ.
+ */
+static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
+			     unsigned long dev_ino)
+{
+	const unsigned int *dev_intrs;
+	unsigned int intr;
+	int i;
+
+	for (i = 0; i < ip->num_intrs; i++) {
+		if (ip->ino_table[i].ino == dev_ino)
+			break;
+	}
+	if (i == ip->num_intrs)
+		return -ENODEV;
+
+	intr = ip->ino_table[i].intr;
+
+	dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
+	if (!dev_intrs)
+		return -ENODEV;
+
+	for (i = 0; i < dev->archdata.num_irqs; i++) {
+		if (dev_intrs[i] == intr)
+			return i;
+	}
+
+	return -ENODEV;
+}
+
+static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
+		       const char *irq_name, struct spu_queue *p,
+		       irq_handler_t handler)
+{
+	unsigned long herr;
+	int index;
+
+	herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
+	if (herr)
+		return -EINVAL;
+
+	index = find_devino_index(dev, ip, p->devino);
+	if (index < 0)
+		return index;
+
+	p->irq = dev->archdata.irqs[index];
+
+	sprintf(p->irq_name, "%s-%d", irq_name, index);
+
+	return request_irq(p->irq, handler, 0, p->irq_name, p);
+}
+
+static struct kmem_cache *queue_cache[2];
+
+static void *new_queue(unsigned long q_type)
+{
+	return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
+}
+
+static void free_queue(void *p, unsigned long q_type)
+{
+	kmem_cache_free(queue_cache[q_type - 1], p);
+}
+
+static int queue_cache_init(void)
+{
+	if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+		queue_cache[HV_NCS_QTYPE_MAU - 1] =
+			kmem_cache_create("mau_queue",
+					  (MAU_NUM_ENTRIES *
+					   MAU_ENTRY_SIZE),
+					  MAU_ENTRY_SIZE, 0, NULL);
+	if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+		return -ENOMEM;
+
+	if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
+		queue_cache[HV_NCS_QTYPE_CWQ - 1] =
+			kmem_cache_create("cwq_queue",
+					  (CWQ_NUM_ENTRIES *
+					   CWQ_ENTRY_SIZE),
+					  CWQ_ENTRY_SIZE, 0, NULL);
+	if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+		kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+		queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void queue_cache_destroy(void)
+{
+	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+	queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+	queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
+}
+
+static long spu_queue_register_workfn(void *arg)
+{
+	struct spu_qreg *qr = arg;
+	struct spu_queue *p = qr->queue;
+	unsigned long q_type = qr->type;
+	unsigned long hv_ret;
+
+	hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
+				 CWQ_NUM_ENTRIES, &p->qhandle);
+	if (!hv_ret)
+		sun4v_ncs_sethead_marker(p->qhandle, 0);
+
+	return hv_ret ? -EINVAL : 0;
+}
+
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+{
+	int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
+	struct spu_qreg qr = { .queue = p, .type = q_type };
+
+	return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
+}
+
+static int spu_queue_setup(struct spu_queue *p)
+{
+	int err;
+
+	p->q = new_queue(p->q_type);
+	if (!p->q)
+		return -ENOMEM;
+
+	err = spu_queue_register(p, p->q_type);
+	if (err) {
+		free_queue(p->q, p->q_type);
+		p->q = NULL;
+	}
+
+	return err;
+}
+
+static void spu_queue_destroy(struct spu_queue *p)
+{
+	unsigned long hv_ret;
+
+	if (!p->q)
+		return;
+
+	hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
+
+	if (!hv_ret)
+		free_queue(p->q, p->q_type);
+}
+
+static void spu_list_destroy(struct list_head *list)
+{
+	struct spu_queue *p, *n;
+
+	list_for_each_entry_safe(p, n, list, list) {
+		int i;
+
+		for (i = 0; i < NR_CPUS; i++) {
+			if (cpu_to_cwq[i] == p)
+				cpu_to_cwq[i] = NULL;
+		}
+
+		if (p->irq) {
+			free_irq(p->irq, p);
+			p->irq = 0;
+		}
+		spu_queue_destroy(p);
+		list_del(&p->list);
+		kfree(p);
+	}
+}
+
+/* Walk the backward arcs of a CWQ 'exec-unit' node,
+ * gathering cpu membership information.
+ */
+static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
+			       struct platform_device *dev,
+			       u64 node, struct spu_queue *p,
+			       struct spu_queue **table)
+{
+	u64 arc;
+
+	mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
+		u64 tgt = mdesc_arc_target(mdesc, arc);
+		const char *name = mdesc_node_name(mdesc, tgt);
+		const u64 *id;
+
+		if (strcmp(name, "cpu"))
+			continue;
+		id = mdesc_get_property(mdesc, tgt, "id", NULL);
+		if (table[*id] != NULL) {
+			dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
+				dev->dev.of_node);
+			return -EINVAL;
+		}
+		cpumask_set_cpu(*id, &p->sharing);
+		table[*id] = p;
+	}
+	return 0;
+}
+
+/* Process an 'exec-unit' MDESC node of type 'cwq'.  */
+static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
+			    struct platform_device *dev, struct mdesc_handle *mdesc,
+			    u64 node, const char *iname, unsigned long q_type,
+			    irq_handler_t handler, struct spu_queue **table)
+{
+	struct spu_queue *p;
+	int err;
+
+	p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
+	if (!p) {
+		dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
+			dev->dev.of_node);
+		return -ENOMEM;
+	}
+
+	cpumask_clear(&p->sharing);
+	spin_lock_init(&p->lock);
+	p->q_type = q_type;
+	INIT_LIST_HEAD(&p->jobs);
+	list_add(&p->list, list);
+
+	err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
+	if (err)
+		return err;
+
+	err = spu_queue_setup(p);
+	if (err)
+		return err;
+
+	return spu_map_ino(dev, ip, iname, p, handler);
+}
+
+static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
+			  struct spu_mdesc_info *ip, struct list_head *list,
+			  const char *exec_name, unsigned long q_type,
+			  irq_handler_t handler, struct spu_queue **table)
+{
+	int err = 0;
+	u64 node;
+
+	mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
+		const char *type;
+
+		type = mdesc_get_property(mdesc, node, "type", NULL);
+		if (!type || strcmp(type, exec_name))
+			continue;
+
+		err = handle_exec_unit(ip, list, dev, mdesc, node,
+				       exec_name, q_type, handler, table);
+		if (err) {
+			spu_list_destroy(list);
+			break;
+		}
+	}
+
+	return err;
+}
+
+static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
+			 struct spu_mdesc_info *ip)
+{
+	const u64 *ino;
+	int ino_len;
+	int i;
+
+	ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
+	if (!ino) {
+		printk("NO 'ino'\n");
+		return -ENODEV;
+	}
+
+	ip->num_intrs = ino_len / sizeof(u64);
+	ip->ino_table = kzalloc((sizeof(struct ino_blob) *
+				 ip->num_intrs),
+				GFP_KERNEL);
+	if (!ip->ino_table)
+		return -ENOMEM;
+
+	for (i = 0; i < ip->num_intrs; i++) {
+		struct ino_blob *b = &ip->ino_table[i];
+		b->intr = i + 1;
+		b->ino = ino[i];
+	}
+
+	return 0;
+}
+
+static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
+				struct platform_device *dev,
+				struct spu_mdesc_info *ip,
+				const char *node_name)
+{
+	const unsigned int *reg;
+	u64 node;
+
+	reg = of_get_property(dev->dev.of_node, "reg", NULL);
+	if (!reg)
+		return -ENODEV;
+
+	mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
+		const char *name;
+		const u64 *chdl;
+
+		name = mdesc_get_property(mdesc, node, "name", NULL);
+		if (!name || strcmp(name, node_name))
+			continue;
+		chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
+		if (!chdl || (*chdl != *reg))
+			continue;
+		ip->cfg_handle = *chdl;
+		return get_irq_props(mdesc, node, ip);
+	}
+
+	return -ENODEV;
+}
+
+static unsigned long n2_spu_hvapi_major;
+static unsigned long n2_spu_hvapi_minor;
+
+static int n2_spu_hvapi_register(void)
+{
+	int err;
+
+	n2_spu_hvapi_major = 2;
+	n2_spu_hvapi_minor = 0;
+
+	err = sun4v_hvapi_register(HV_GRP_NCS,
+				   n2_spu_hvapi_major,
+				   &n2_spu_hvapi_minor);
+
+	if (!err)
+		pr_info("Registered NCS HVAPI version %lu.%lu\n",
+			n2_spu_hvapi_major,
+			n2_spu_hvapi_minor);
+
+	return err;
+}
+
+static void n2_spu_hvapi_unregister(void)
+{
+	sun4v_hvapi_unregister(HV_GRP_NCS);
+}
+
+static int global_ref;
+
+static int grab_global_resources(void)
+{
+	int err = 0;
+
+	mutex_lock(&spu_lock);
+
+	if (global_ref++)
+		goto out;
+
+	err = n2_spu_hvapi_register();
+	if (err)
+		goto out;
+
+	err = queue_cache_init();
+	if (err)
+		goto out_hvapi_release;
+
+	err = -ENOMEM;
+	cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
+			     GFP_KERNEL);
+	if (!cpu_to_cwq)
+		goto out_queue_cache_destroy;
+
+	cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
+			     GFP_KERNEL);
+	if (!cpu_to_mau)
+		goto out_free_cwq_table;
+
+	err = 0;
+
+out:
+	if (err)
+		global_ref--;
+	mutex_unlock(&spu_lock);
+	return err;
+
+out_free_cwq_table:
+	kfree(cpu_to_cwq);
+	cpu_to_cwq = NULL;
+
+out_queue_cache_destroy:
+	queue_cache_destroy();
+
+out_hvapi_release:
+	n2_spu_hvapi_unregister();
+	goto out;
+}
+
+static void release_global_resources(void)
+{
+	mutex_lock(&spu_lock);
+	if (!--global_ref) {
+		kfree(cpu_to_cwq);
+		cpu_to_cwq = NULL;
+
+		kfree(cpu_to_mau);
+		cpu_to_mau = NULL;
+
+		queue_cache_destroy();
+		n2_spu_hvapi_unregister();
+	}
+	mutex_unlock(&spu_lock);
+}
+
+static struct n2_crypto *alloc_n2cp(void)
+{
+	struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
+
+	if (np)
+		INIT_LIST_HEAD(&np->cwq_list);
+
+	return np;
+}
+
+static void free_n2cp(struct n2_crypto *np)
+{
+	kfree(np->cwq_info.ino_table);
+	np->cwq_info.ino_table = NULL;
+
+	kfree(np);
+}
+
+static void n2_spu_driver_version(void)
+{
+	static int n2_spu_version_printed;
+
+	if (n2_spu_version_printed++ == 0)
+		pr_info("%s", version);
+}
+
+static int n2_crypto_probe(struct platform_device *dev)
+{
+	struct mdesc_handle *mdesc;
+	struct n2_crypto *np;
+	int err;
+
+	n2_spu_driver_version();
+
+	pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
+
+	np = alloc_n2cp();
+	if (!np) {
+		dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
+			dev->dev.of_node);
+		return -ENOMEM;
+	}
+
+	err = grab_global_resources();
+	if (err) {
+		dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
+			dev->dev.of_node);
+		goto out_free_n2cp;
+	}
+
+	mdesc = mdesc_grab();
+
+	if (!mdesc) {
+		dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
+			dev->dev.of_node);
+		err = -ENODEV;
+		goto out_free_global;
+	}
+	err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
+	if (err) {
+		dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
+			dev->dev.of_node);
+		mdesc_release(mdesc);
+		goto out_free_global;
+	}
+
+	err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
+			     "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
+			     cpu_to_cwq);
+	mdesc_release(mdesc);
+
+	if (err) {
+		dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
+			dev->dev.of_node);
+		goto out_free_global;
+	}
+
+	err = n2_register_algs();
+	if (err) {
+		dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
+			dev->dev.of_node);
+		goto out_free_spu_list;
+	}
+
+	dev_set_drvdata(&dev->dev, np);
+
+	return 0;
+
+out_free_spu_list:
+	spu_list_destroy(&np->cwq_list);
+
+out_free_global:
+	release_global_resources();
+
+out_free_n2cp:
+	free_n2cp(np);
+
+	return err;
+}
+
+static int n2_crypto_remove(struct platform_device *dev)
+{
+	struct n2_crypto *np = dev_get_drvdata(&dev->dev);
+
+	n2_unregister_algs();
+
+	spu_list_destroy(&np->cwq_list);
+
+	release_global_resources();
+
+	free_n2cp(np);
+
+	return 0;
+}
+
+static struct n2_mau *alloc_ncp(void)
+{
+	struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
+
+	if (mp)
+		INIT_LIST_HEAD(&mp->mau_list);
+
+	return mp;
+}
+
+static void free_ncp(struct n2_mau *mp)
+{
+	kfree(mp->mau_info.ino_table);
+	mp->mau_info.ino_table = NULL;
+
+	kfree(mp);
+}
+
+static int n2_mau_probe(struct platform_device *dev)
+{
+	struct mdesc_handle *mdesc;
+	struct n2_mau *mp;
+	int err;
+
+	n2_spu_driver_version();
+
+	pr_info("Found NCP at %pOF\n", dev->dev.of_node);
+
+	mp = alloc_ncp();
+	if (!mp) {
+		dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
+			dev->dev.of_node);
+		return -ENOMEM;
+	}
+
+	err = grab_global_resources();
+	if (err) {
+		dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
+			dev->dev.of_node);
+		goto out_free_ncp;
+	}
+
+	mdesc = mdesc_grab();
+
+	if (!mdesc) {
+		dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
+			dev->dev.of_node);
+		err = -ENODEV;
+		goto out_free_global;
+	}
+
+	err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
+	if (err) {
+		dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
+			dev->dev.of_node);
+		mdesc_release(mdesc);
+		goto out_free_global;
+	}
+
+	err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
+			     "mau", HV_NCS_QTYPE_MAU, mau_intr,
+			     cpu_to_mau);
+	mdesc_release(mdesc);
+
+	if (err) {
+		dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
+			dev->dev.of_node);
+		goto out_free_global;
+	}
+
+	dev_set_drvdata(&dev->dev, mp);
+
+	return 0;
+
+out_free_global:
+	release_global_resources();
+
+out_free_ncp:
+	free_ncp(mp);
+
+	return err;
+}
+
+static int n2_mau_remove(struct platform_device *dev)
+{
+	struct n2_mau *mp = dev_get_drvdata(&dev->dev);
+
+	spu_list_destroy(&mp->mau_list);
+
+	release_global_resources();
+
+	free_ncp(mp);
+
+	return 0;
+}
+
+static const struct of_device_id n2_crypto_match[] = {
+	{
+		.name = "n2cp",
+		.compatible = "SUNW,n2-cwq",
+	},
+	{
+		.name = "n2cp",
+		.compatible = "SUNW,vf-cwq",
+	},
+	{
+		.name = "n2cp",
+		.compatible = "SUNW,kt-cwq",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, n2_crypto_match);
+
+static struct platform_driver n2_crypto_driver = {
+	.driver = {
+		.name		=	"n2cp",
+		.of_match_table	=	n2_crypto_match,
+	},
+	.probe		=	n2_crypto_probe,
+	.remove		=	n2_crypto_remove,
+};
+
+static const struct of_device_id n2_mau_match[] = {
+	{
+		.name = "ncp",
+		.compatible = "SUNW,n2-mau",
+	},
+	{
+		.name = "ncp",
+		.compatible = "SUNW,vf-mau",
+	},
+	{
+		.name = "ncp",
+		.compatible = "SUNW,kt-mau",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, n2_mau_match);
+
+static struct platform_driver n2_mau_driver = {
+	.driver = {
+		.name		=	"ncp",
+		.of_match_table	=	n2_mau_match,
+	},
+	.probe		=	n2_mau_probe,
+	.remove		=	n2_mau_remove,
+};
+
+static struct platform_driver * const drivers[] = {
+	&n2_crypto_driver,
+	&n2_mau_driver,
+};
+
+static int __init n2_init(void)
+{
+	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+
+static void __exit n2_exit(void)
+{
+	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+
+module_init(n2_init);
+module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
new file mode 100644
index 0000000..2406763
--- /dev/null
+++ b/drivers/crypto/n2_core.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _N2_CORE_H
+#define _N2_CORE_H
+
+#ifndef __ASSEMBLY__
+
+struct ino_blob {
+	u64			intr;
+	u64			ino;
+};
+
+struct spu_mdesc_info {
+	u64			cfg_handle;
+	struct ino_blob		*ino_table;
+	int			num_intrs;
+};
+
+struct n2_crypto {
+	struct spu_mdesc_info	cwq_info;
+	struct list_head	cwq_list;
+};
+
+struct n2_mau {
+	struct spu_mdesc_info	mau_info;
+	struct list_head	mau_list;
+};
+
+#define CWQ_ENTRY_SIZE		64
+#define CWQ_NUM_ENTRIES		64
+
+#define MAU_ENTRY_SIZE		64
+#define MAU_NUM_ENTRIES		64
+
+struct cwq_initial_entry {
+	u64			control;
+	u64			src_addr;
+	u64			auth_key_addr;
+	u64			auth_iv_addr;
+	u64			final_auth_state_addr;
+	u64			enc_key_addr;
+	u64			enc_iv_addr;
+	u64			dest_addr;
+};
+
+struct cwq_ext_entry {
+	u64			len;
+	u64			src_addr;
+	u64			resv1;
+	u64			resv2;
+	u64			resv3;
+	u64			resv4;
+	u64			resv5;
+	u64			resv6;
+};
+
+struct cwq_final_entry {
+	u64			control;
+	u64			src_addr;
+	u64			resv1;
+	u64			resv2;
+	u64			resv3;
+	u64			resv4;
+	u64			resv5;
+	u64			resv6;
+};
+
+#define CONTROL_LEN			0x000000000000ffffULL
+#define CONTROL_LEN_SHIFT		0
+#define CONTROL_HMAC_KEY_LEN		0x0000000000ff0000ULL
+#define CONTROL_HMAC_KEY_LEN_SHIFT	16
+#define CONTROL_ENC_TYPE		0x00000000ff000000ULL
+#define CONTROL_ENC_TYPE_SHIFT		24
+#define  ENC_TYPE_ALG_RC4_STREAM	0x00ULL
+#define  ENC_TYPE_ALG_RC4_NOSTREAM	0x04ULL
+#define  ENC_TYPE_ALG_DES		0x08ULL
+#define  ENC_TYPE_ALG_3DES		0x0cULL
+#define  ENC_TYPE_ALG_AES128		0x10ULL
+#define  ENC_TYPE_ALG_AES192		0x14ULL
+#define  ENC_TYPE_ALG_AES256		0x18ULL
+#define  ENC_TYPE_ALG_RESERVED		0x1cULL
+#define  ENC_TYPE_ALG_MASK		0x1cULL
+#define  ENC_TYPE_CHAINING_ECB		0x00ULL
+#define  ENC_TYPE_CHAINING_CBC		0x01ULL
+#define  ENC_TYPE_CHAINING_CFB		0x02ULL
+#define  ENC_TYPE_CHAINING_COUNTER	0x03ULL
+#define  ENC_TYPE_CHAINING_MASK		0x03ULL
+#define CONTROL_AUTH_TYPE		0x0000001f00000000ULL
+#define CONTROL_AUTH_TYPE_SHIFT		32
+#define  AUTH_TYPE_RESERVED		0x00ULL
+#define  AUTH_TYPE_MD5			0x01ULL
+#define  AUTH_TYPE_SHA1			0x02ULL
+#define  AUTH_TYPE_SHA256		0x03ULL
+#define  AUTH_TYPE_CRC32		0x04ULL
+#define  AUTH_TYPE_HMAC_MD5		0x05ULL
+#define  AUTH_TYPE_HMAC_SHA1		0x06ULL
+#define  AUTH_TYPE_HMAC_SHA256		0x07ULL
+#define  AUTH_TYPE_TCP_CHECKSUM		0x08ULL
+#define  AUTH_TYPE_SSL_HMAC_MD5		0x09ULL
+#define  AUTH_TYPE_SSL_HMAC_SHA1	0x0aULL
+#define  AUTH_TYPE_SSL_HMAC_SHA256	0x0bULL
+#define CONTROL_STRAND			0x000000e000000000ULL
+#define CONTROL_STRAND_SHIFT		37
+#define CONTROL_HASH_LEN		0x0000ff0000000000ULL
+#define CONTROL_HASH_LEN_SHIFT		40
+#define CONTROL_INTERRUPT		0x0001000000000000ULL
+#define CONTROL_STORE_FINAL_AUTH_STATE	0x0002000000000000ULL
+#define CONTROL_RESERVED		0x001c000000000000ULL
+#define CONTROL_HV_DONE			0x0004000000000000ULL
+#define CONTROL_HV_PROTOCOL_ERROR	0x0008000000000000ULL
+#define CONTROL_HV_HARDWARE_ERROR	0x0010000000000000ULL
+#define CONTROL_END_OF_BLOCK		0x0020000000000000ULL
+#define CONTROL_START_OF_BLOCK		0x0040000000000000ULL
+#define CONTROL_ENCRYPT			0x0080000000000000ULL
+#define CONTROL_OPCODE			0xff00000000000000ULL
+#define CONTROL_OPCODE_SHIFT		56
+#define  OPCODE_INPLACE_BIT		0x80ULL
+#define  OPCODE_SSL_KEYBLOCK		0x10ULL
+#define  OPCODE_COPY			0x20ULL
+#define  OPCODE_ENCRYPT			0x40ULL
+#define  OPCODE_AUTH_MAC		0x41ULL
+
+#endif /* !(__ASSEMBLY__) */
+
+/* NCS v2.0 hypervisor interfaces */
+#define HV_NCS_QTYPE_MAU		0x01
+#define HV_NCS_QTYPE_CWQ		0x02
+
+/* ncs_qconf()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_QCONF
+ * ARG0:	Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * ARG1:	Real address of queue, or handle for unconfigure
+ * ARG2:	Number of entries in queue, zero for unconfigure
+ * RET0:	status
+ * RET1:	queue handle
+ *
+ * Configure a queue in the stream processing unit.
+ *
+ * The real address given as the base must be 64-byte
+ * aligned.
+ *
+ * The queue size can range from a minimum of 2 to a maximum
+ * of 64.  The queue size must be a power of two.
+ *
+ * To unconfigure a queue, specify a length of zero and place
+ * the queue handle into ARG1.
+ *
+ * On configure success the hypervisor will set the FIRST, HEAD,
+ * and TAIL registers to the address of the first entry in the
+ * queue.  The LAST register will be set to point to the last
+ * entry in the queue.
+ */
+#define HV_FAST_NCS_QCONF		0x111
+
+/* ncs_qinfo()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_QINFO
+ * ARG0:	Queue handle
+ * RET0:	status
+ * RET1:	Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * RET2:	Queue base address
+ * RET3:	Number of entries
+ */
+#define HV_FAST_NCS_QINFO		0x112
+
+/* ncs_gethead()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_GETHEAD
+ * ARG0:	Queue handle
+ * RET0:	status
+ * RET1:	queue head offset
+ */
+#define HV_FAST_NCS_GETHEAD		0x113
+
+/* ncs_gettail()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_GETTAIL
+ * ARG0:	Queue handle
+ * RET0:	status
+ * RET1:	queue tail offset
+ */
+#define HV_FAST_NCS_GETTAIL		0x114
+
+/* ncs_settail()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_SETTAIL
+ * ARG0:	Queue handle
+ * ARG1:	New tail offset
+ * RET0:	status
+ */
+#define HV_FAST_NCS_SETTAIL		0x115
+
+/* ncs_qhandle_to_devino()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_QHANDLE_TO_DEVINO
+ * ARG0:	Queue handle
+ * RET0:	status
+ * RET1:	devino
+ */
+#define HV_FAST_NCS_QHANDLE_TO_DEVINO	0x116
+
+/* ncs_sethead_marker()
+ * TRAP:	HV_FAST_TRAP
+ * FUNCTION:	HV_FAST_NCS_SETHEAD_MARKER
+ * ARG0:	Queue handle
+ * ARG1:	New head offset
+ * RET0:	status
+ */
+#define HV_FAST_NCS_SETHEAD_MARKER	0x117
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
+				     unsigned long queue_ra,
+				     unsigned long num_entries,
+				     unsigned long *qhandle);
+extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
+				     unsigned long *queue_type,
+				     unsigned long *queue_ra,
+				     unsigned long *num_entries);
+extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
+				       unsigned long *head);
+extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
+				       unsigned long *tail);
+extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
+				       unsigned long tail);
+extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
+						 unsigned long *devino);
+extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
+					      unsigned long head);
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
new file mode 100644
index 0000000..cd5dda9
--- /dev/null
+++ b/drivers/crypto/nx/Kconfig
@@ -0,0 +1,49 @@
+
+config CRYPTO_DEV_NX_ENCRYPT
+	tristate "Encryption acceleration support on pSeries platform"
+	depends on PPC_PSERIES && IBMVIO && !CPU_LITTLE_ENDIAN
+	default y
+	select CRYPTO_AES
+	select CRYPTO_CCM
+	help
+	  Support for PowerPC Nest (NX) encryption acceleration. This
+	  module supports acceleration for AES and SHA2 algorithms on
+	  the pSeries platform.  If you choose 'M' here, this module
+	  will be called nx_crypto.
+
+config CRYPTO_DEV_NX_COMPRESS
+	tristate "Compression acceleration support"
+	default y
+	select CRYPTO_ALGAPI
+	select 842_DECOMPRESS
+	help
+	  Support for PowerPC Nest (NX) compression acceleration. This
+	  module supports acceleration for compressing memory with the 842
+	  algorithm using the cryptographic API.  One of the platform
+	  drivers must be selected also.  If you choose 'M' here, this
+	  module will be called nx_compress.
+
+if CRYPTO_DEV_NX_COMPRESS
+
+config CRYPTO_DEV_NX_COMPRESS_PSERIES
+	tristate "Compression acceleration support on pSeries platform"
+	depends on PPC_PSERIES && IBMVIO
+	default y
+	help
+	  Support for PowerPC Nest (NX) compression acceleration. This
+	  module supports acceleration for compressing memory with the 842
+	  algorithm.  This supports NX hardware on the pSeries platform.
+	  If you choose 'M' here, this module will be called nx_compress_pseries.
+
+config CRYPTO_DEV_NX_COMPRESS_POWERNV
+	tristate "Compression acceleration support on PowerNV platform"
+	depends on PPC_POWERNV
+	depends on PPC_VAS
+	default y
+	help
+	  Support for PowerPC Nest (NX) compression acceleration. This
+	  module supports acceleration for compressing memory with the 842
+	  algorithm.  This supports NX hardware on the PowerNV platform.
+	  If you choose 'M' here, this module will be called nx_compress_powernv.
+
+endif
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
new file mode 100644
index 0000000..015155d
--- /dev/null
+++ b/drivers/crypto/nx/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
+nx-crypto-objs := nx.o \
+		  nx_debugfs.o \
+		  nx-aes-cbc.o \
+		  nx-aes-ecb.o \
+		  nx-aes-gcm.o \
+		  nx-aes-ccm.o \
+		  nx-aes-ctr.o \
+		  nx-aes-xcbc.o \
+		  nx-sha256.o \
+		  nx-sha512.o
+
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
+nx-compress-objs := nx-842.o
+nx-compress-pseries-objs := nx-842-pseries.o
+nx-compress-powernv-objs := nx-842-powernv.o
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
new file mode 100644
index 0000000..c68df7e
--- /dev/null
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -0,0 +1,1068 @@
+/*
+ * Driver for IBM PowerNV 842 compression accelerator
+ *
+ * Copyright (C) 2015 Dan Streetman, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "nx-842.h"
+
+#include <linux/timer.h>
+
+#include <asm/prom.h>
+#include <asm/icswx.h>
+#include <asm/vas.h>
+#include <asm/reg.h>
+#include <asm/opal-api.h>
+#include <asm/opal.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
+MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
+
+#define WORKMEM_ALIGN	(CRB_ALIGN)
+#define CSB_WAIT_MAX	(5000) /* ms */
+#define VAS_RETRIES	(10)
+/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
+#define MAX_CREDITS_PER_RXFIFO	(1024)
+
+struct nx842_workmem {
+	/* Below fields must be properly aligned */
+	struct coprocessor_request_block crb; /* CRB_ALIGN align */
+	struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */
+	struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */
+	/* Above fields must be properly aligned */
+
+	ktime_t start;
+
+	char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
+} __packed __aligned(WORKMEM_ALIGN);
+
+struct nx842_coproc {
+	unsigned int chip_id;
+	unsigned int ct;
+	unsigned int ci;	/* Coprocessor instance, used with icswx */
+	struct {
+		struct vas_window *rxwin;
+		int id;
+	} vas;
+	struct list_head list;
+};
+
+/*
+ * Send the request to NX engine on the chip for the corresponding CPU
+ * where the process is executing. Use with VAS function.
+ */
+static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
+
+/* no cpu hotplug on powernv, so this list never changes after init */
+static LIST_HEAD(nx842_coprocs);
+static unsigned int nx842_ct;	/* used in icswx function */
+
+static int (*nx842_powernv_exec)(const unsigned char *in,
+				unsigned int inlen, unsigned char *out,
+				unsigned int *outlenp, void *workmem, int fc);
+
+/**
+ * setup_indirect_dde - Setup an indirect DDE
+ *
+ * The DDE is setup with the the DDE count, byte count, and address of
+ * first direct DDE in the list.
+ */
+static void setup_indirect_dde(struct data_descriptor_entry *dde,
+			       struct data_descriptor_entry *ddl,
+			       unsigned int dde_count, unsigned int byte_count)
+{
+	dde->flags = 0;
+	dde->count = dde_count;
+	dde->index = 0;
+	dde->length = cpu_to_be32(byte_count);
+	dde->address = cpu_to_be64(nx842_get_pa(ddl));
+}
+
+/**
+ * setup_direct_dde - Setup single DDE from buffer
+ *
+ * The DDE is setup with the buffer and length.  The buffer must be properly
+ * aligned.  The used length is returned.
+ * Returns:
+ *   N    Successfully set up DDE with N bytes
+ */
+static unsigned int setup_direct_dde(struct data_descriptor_entry *dde,
+				     unsigned long pa, unsigned int len)
+{
+	unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa));
+
+	dde->flags = 0;
+	dde->count = 0;
+	dde->index = 0;
+	dde->length = cpu_to_be32(l);
+	dde->address = cpu_to_be64(pa);
+
+	return l;
+}
+
+/**
+ * setup_ddl - Setup DDL from buffer
+ *
+ * Returns:
+ *   0		Successfully set up DDL
+ */
+static int setup_ddl(struct data_descriptor_entry *dde,
+		     struct data_descriptor_entry *ddl,
+		     unsigned char *buf, unsigned int len,
+		     bool in)
+{
+	unsigned long pa = nx842_get_pa(buf);
+	int i, ret, total_len = len;
+
+	if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) {
+		pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n",
+			 in ? "input" : "output", pa, DDE_BUFFER_ALIGN);
+		return -EINVAL;
+	}
+
+	/* only need to check last mult; since buffer must be
+	 * DDE_BUFFER_ALIGN aligned, and that is a multiple of
+	 * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers
+	 * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT.
+	 */
+	if (len % DDE_BUFFER_LAST_MULT) {
+		pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n",
+			 in ? "input" : "output", len, DDE_BUFFER_LAST_MULT);
+		if (in)
+			return -EINVAL;
+		len = round_down(len, DDE_BUFFER_LAST_MULT);
+	}
+
+	/* use a single direct DDE */
+	if (len <= LEN_ON_PAGE(pa)) {
+		ret = setup_direct_dde(dde, pa, len);
+		WARN_ON(ret < len);
+		return 0;
+	}
+
+	/* use the DDL */
+	for (i = 0; i < DDL_LEN_MAX && len > 0; i++) {
+		ret = setup_direct_dde(&ddl[i], pa, len);
+		buf += ret;
+		len -= ret;
+		pa = nx842_get_pa(buf);
+	}
+
+	if (len > 0) {
+		pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n",
+			 total_len, in ? "input" : "output", len);
+		if (in)
+			return -EMSGSIZE;
+		total_len -= len;
+	}
+	setup_indirect_dde(dde, ddl, i, total_len);
+
+	return 0;
+}
+
+#define CSB_ERR(csb, msg, ...)					\
+	pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n",	\
+	       ##__VA_ARGS__, (csb)->flags,			\
+	       (csb)->cs, (csb)->cc, (csb)->ce,			\
+	       be32_to_cpu((csb)->count))
+
+#define CSB_ERR_ADDR(csb, msg, ...)				\
+	CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__,		\
+		(unsigned long)be64_to_cpu((csb)->address))
+
+/**
+ * wait_for_csb
+ */
+static int wait_for_csb(struct nx842_workmem *wmem,
+			struct coprocessor_status_block *csb)
+{
+	ktime_t start = wmem->start, now = ktime_get();
+	ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
+
+	while (!(READ_ONCE(csb->flags) & CSB_V)) {
+		cpu_relax();
+		now = ktime_get();
+		if (ktime_after(now, timeout))
+			break;
+	}
+
+	/* hw has updated csb and output buffer */
+	barrier();
+
+	/* check CSB flags */
+	if (!(csb->flags & CSB_V)) {
+		CSB_ERR(csb, "CSB still not valid after %ld us, giving up",
+			(long)ktime_us_delta(now, start));
+		return -ETIMEDOUT;
+	}
+	if (csb->flags & CSB_F) {
+		CSB_ERR(csb, "Invalid CSB format");
+		return -EPROTO;
+	}
+	if (csb->flags & CSB_CH) {
+		CSB_ERR(csb, "Invalid CSB chaining state");
+		return -EPROTO;
+	}
+
+	/* verify CSB completion sequence is 0 */
+	if (csb->cs) {
+		CSB_ERR(csb, "Invalid CSB completion sequence");
+		return -EPROTO;
+	}
+
+	/* check CSB Completion Code */
+	switch (csb->cc) {
+	/* no error */
+	case CSB_CC_SUCCESS:
+		break;
+	case CSB_CC_TPBC_GT_SPBC:
+		/* not an error, but the compressed data is
+		 * larger than the uncompressed data :(
+		 */
+		break;
+
+	/* input data errors */
+	case CSB_CC_OPERAND_OVERLAP:
+		/* input and output buffers overlap */
+		CSB_ERR(csb, "Operand Overlap error");
+		return -EINVAL;
+	case CSB_CC_INVALID_OPERAND:
+		CSB_ERR(csb, "Invalid operand");
+		return -EINVAL;
+	case CSB_CC_NOSPC:
+		/* output buffer too small */
+		return -ENOSPC;
+	case CSB_CC_ABORT:
+		CSB_ERR(csb, "Function aborted");
+		return -EINTR;
+	case CSB_CC_CRC_MISMATCH:
+		CSB_ERR(csb, "CRC mismatch");
+		return -EINVAL;
+	case CSB_CC_TEMPL_INVALID:
+		CSB_ERR(csb, "Compressed data template invalid");
+		return -EINVAL;
+	case CSB_CC_TEMPL_OVERFLOW:
+		CSB_ERR(csb, "Compressed data template shows data past end");
+		return -EINVAL;
+	case CSB_CC_EXCEED_BYTE_COUNT:	/* P9 or later */
+		/*
+		 * DDE byte count exceeds the limit specified in Maximum
+		 * byte count register.
+		 */
+		CSB_ERR(csb, "DDE byte count exceeds the limit");
+		return -EINVAL;
+
+	/* these should not happen */
+	case CSB_CC_INVALID_ALIGN:
+		/* setup_ddl should have detected this */
+		CSB_ERR_ADDR(csb, "Invalid alignment");
+		return -EINVAL;
+	case CSB_CC_DATA_LENGTH:
+		/* setup_ddl should have detected this */
+		CSB_ERR(csb, "Invalid data length");
+		return -EINVAL;
+	case CSB_CC_WR_TRANSLATION:
+	case CSB_CC_TRANSLATION:
+	case CSB_CC_TRANSLATION_DUP1:
+	case CSB_CC_TRANSLATION_DUP2:
+	case CSB_CC_TRANSLATION_DUP3:
+	case CSB_CC_TRANSLATION_DUP4:
+	case CSB_CC_TRANSLATION_DUP5:
+	case CSB_CC_TRANSLATION_DUP6:
+		/* should not happen, we use physical addrs */
+		CSB_ERR_ADDR(csb, "Translation error");
+		return -EPROTO;
+	case CSB_CC_WR_PROTECTION:
+	case CSB_CC_PROTECTION:
+	case CSB_CC_PROTECTION_DUP1:
+	case CSB_CC_PROTECTION_DUP2:
+	case CSB_CC_PROTECTION_DUP3:
+	case CSB_CC_PROTECTION_DUP4:
+	case CSB_CC_PROTECTION_DUP5:
+	case CSB_CC_PROTECTION_DUP6:
+		/* should not happen, we use physical addrs */
+		CSB_ERR_ADDR(csb, "Protection error");
+		return -EPROTO;
+	case CSB_CC_PRIVILEGE:
+		/* shouldn't happen, we're in HYP mode */
+		CSB_ERR(csb, "Insufficient Privilege error");
+		return -EPROTO;
+	case CSB_CC_EXCESSIVE_DDE:
+		/* shouldn't happen, setup_ddl doesn't use many dde's */
+		CSB_ERR(csb, "Too many DDEs in DDL");
+		return -EINVAL;
+	case CSB_CC_TRANSPORT:
+	case CSB_CC_INVALID_CRB:	/* P9 or later */
+		/* shouldn't happen, we setup CRB correctly */
+		CSB_ERR(csb, "Invalid CRB");
+		return -EINVAL;
+	case CSB_CC_INVALID_DDE:	/* P9 or later */
+		/*
+		 * shouldn't happen, setup_direct/indirect_dde creates
+		 * DDE right
+		 */
+		CSB_ERR(csb, "Invalid DDE");
+		return -EINVAL;
+	case CSB_CC_SEGMENTED_DDL:
+		/* shouldn't happen, setup_ddl creates DDL right */
+		CSB_ERR(csb, "Segmented DDL error");
+		return -EINVAL;
+	case CSB_CC_DDE_OVERFLOW:
+		/* shouldn't happen, setup_ddl creates DDL right */
+		CSB_ERR(csb, "DDE overflow error");
+		return -EINVAL;
+	case CSB_CC_SESSION:
+		/* should not happen with ICSWX */
+		CSB_ERR(csb, "Session violation error");
+		return -EPROTO;
+	case CSB_CC_CHAIN:
+		/* should not happen, we don't use chained CRBs */
+		CSB_ERR(csb, "Chained CRB error");
+		return -EPROTO;
+	case CSB_CC_SEQUENCE:
+		/* should not happen, we don't use chained CRBs */
+		CSB_ERR(csb, "CRB sequence number error");
+		return -EPROTO;
+	case CSB_CC_UNKNOWN_CODE:
+		CSB_ERR(csb, "Unknown subfunction code");
+		return -EPROTO;
+
+	/* hardware errors */
+	case CSB_CC_RD_EXTERNAL:
+	case CSB_CC_RD_EXTERNAL_DUP1:
+	case CSB_CC_RD_EXTERNAL_DUP2:
+	case CSB_CC_RD_EXTERNAL_DUP3:
+		CSB_ERR_ADDR(csb, "Read error outside coprocessor");
+		return -EPROTO;
+	case CSB_CC_WR_EXTERNAL:
+		CSB_ERR_ADDR(csb, "Write error outside coprocessor");
+		return -EPROTO;
+	case CSB_CC_INTERNAL:
+		CSB_ERR(csb, "Internal error in coprocessor");
+		return -EPROTO;
+	case CSB_CC_PROVISION:
+		CSB_ERR(csb, "Storage provision error");
+		return -EPROTO;
+	case CSB_CC_HW:
+		CSB_ERR(csb, "Correctable hardware error");
+		return -EPROTO;
+	case CSB_CC_HW_EXPIRED_TIMER:	/* P9 or later */
+		CSB_ERR(csb, "Job did not finish within allowed time");
+		return -EPROTO;
+
+	default:
+		CSB_ERR(csb, "Invalid CC %d", csb->cc);
+		return -EPROTO;
+	}
+
+	/* check Completion Extension state */
+	if (csb->ce & CSB_CE_TERMINATION) {
+		CSB_ERR(csb, "CSB request was terminated");
+		return -EPROTO;
+	}
+	if (csb->ce & CSB_CE_INCOMPLETE) {
+		CSB_ERR(csb, "CSB request not complete");
+		return -EPROTO;
+	}
+	if (!(csb->ce & CSB_CE_TPBC)) {
+		CSB_ERR(csb, "TPBC not provided, unknown target length");
+		return -EPROTO;
+	}
+
+	/* successful completion */
+	pr_debug_ratelimited("Processed %u bytes in %lu us\n",
+			     be32_to_cpu(csb->count),
+			     (unsigned long)ktime_us_delta(now, start));
+
+	return 0;
+}
+
+static int nx842_config_crb(const unsigned char *in, unsigned int inlen,
+			unsigned char *out, unsigned int outlen,
+			struct nx842_workmem *wmem)
+{
+	struct coprocessor_request_block *crb;
+	struct coprocessor_status_block *csb;
+	u64 csb_addr;
+	int ret;
+
+	crb = &wmem->crb;
+	csb = &crb->csb;
+
+	/* Clear any previous values */
+	memset(crb, 0, sizeof(*crb));
+
+	/* set up DDLs */
+	ret = setup_ddl(&crb->source, wmem->ddl_in,
+			(unsigned char *)in, inlen, true);
+	if (ret)
+		return ret;
+
+	ret = setup_ddl(&crb->target, wmem->ddl_out,
+			out, outlen, false);
+	if (ret)
+		return ret;
+
+	/* set up CRB's CSB addr */
+	csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
+	csb_addr |= CRB_CSB_AT; /* Addrs are phys */
+	crb->csb_addr = cpu_to_be64(csb_addr);
+
+	return 0;
+}
+
+/**
+ * nx842_exec_icswx - compress/decompress data using the 842 algorithm
+ *
+ * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * This compresses or decompresses the provided input buffer into the provided
+ * output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * output data.  If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * The @workmem buffer should only be used by one function call at a time.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ *           nx842_powernv_driver.workmem_size
+ * @fc: function code, see CCW Function Codes in nx-842.h
+ *
+ * Returns:
+ *   0		Success, output of length @outlenp stored in the buffer at @out
+ *   -ENODEV	Hardware unavailable
+ *   -ENOSPC	Output buffer is to small
+ *   -EMSGSIZE	Input buffer too large
+ *   -EINVAL	buffer constraints do not fix nx842_constraints
+ *   -EPROTO	hardware error during operation
+ *   -ETIMEDOUT	hardware did not complete operation in reasonable time
+ *   -EINTR	operation was aborted
+ */
+static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen,
+				  unsigned char *out, unsigned int *outlenp,
+				  void *workmem, int fc)
+{
+	struct coprocessor_request_block *crb;
+	struct coprocessor_status_block *csb;
+	struct nx842_workmem *wmem;
+	int ret;
+	u32 ccw;
+	unsigned int outlen = *outlenp;
+
+	wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
+
+	*outlenp = 0;
+
+	/* shoudn't happen, we don't load without a coproc */
+	if (!nx842_ct) {
+		pr_err_ratelimited("coprocessor CT is 0");
+		return -ENODEV;
+	}
+
+	ret = nx842_config_crb(in, inlen, out, outlen, wmem);
+	if (ret)
+		return ret;
+
+	crb = &wmem->crb;
+	csb = &crb->csb;
+
+	/* set up CCW */
+	ccw = 0;
+	ccw = SET_FIELD(CCW_CT, ccw, nx842_ct);
+	ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */
+	ccw = SET_FIELD(CCW_FC_842, ccw, fc);
+
+	wmem->start = ktime_get();
+
+	/* do ICSWX */
+	ret = icswx(cpu_to_be32(ccw), crb);
+
+	pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret,
+			     (unsigned int)ccw,
+			     (unsigned int)be32_to_cpu(crb->ccw));
+
+	/*
+	 * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
+	 * XER[S0] is the integer summary overflow bit which is nothing
+	 * to do NX. Since this bit can be set with other return values,
+	 * mask this bit.
+	 */
+	ret &= ~ICSWX_XERS0;
+
+	switch (ret) {
+	case ICSWX_INITIATED:
+		ret = wait_for_csb(wmem, csb);
+		break;
+	case ICSWX_BUSY:
+		pr_debug_ratelimited("842 Coprocessor busy\n");
+		ret = -EBUSY;
+		break;
+	case ICSWX_REJECTED:
+		pr_err_ratelimited("ICSWX rejected\n");
+		ret = -EPROTO;
+		break;
+	}
+
+	if (!ret)
+		*outlenp = be32_to_cpu(csb->count);
+
+	return ret;
+}
+
+/**
+ * nx842_exec_vas - compress/decompress data using the 842 algorithm
+ *
+ * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * This compresses or decompresses the provided input buffer into the provided
+ * output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * output data.  If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * The @workmem buffer should only be used by one function call at a time.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ *           nx842_powernv_driver.workmem_size
+ * @fc: function code, see CCW Function Codes in nx-842.h
+ *
+ * Returns:
+ *   0		Success, output of length @outlenp stored in the buffer
+ *		at @out
+ *   -ENODEV	Hardware unavailable
+ *   -ENOSPC	Output buffer is to small
+ *   -EMSGSIZE	Input buffer too large
+ *   -EINVAL	buffer constraints do not fix nx842_constraints
+ *   -EPROTO	hardware error during operation
+ *   -ETIMEDOUT	hardware did not complete operation in reasonable time
+ *   -EINTR	operation was aborted
+ */
+static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
+				  unsigned char *out, unsigned int *outlenp,
+				  void *workmem, int fc)
+{
+	struct coprocessor_request_block *crb;
+	struct coprocessor_status_block *csb;
+	struct nx842_workmem *wmem;
+	struct vas_window *txwin;
+	int ret, i = 0;
+	u32 ccw;
+	unsigned int outlen = *outlenp;
+
+	wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
+
+	*outlenp = 0;
+
+	crb = &wmem->crb;
+	csb = &crb->csb;
+
+	ret = nx842_config_crb(in, inlen, out, outlen, wmem);
+	if (ret)
+		return ret;
+
+	ccw = 0;
+	ccw = SET_FIELD(CCW_FC_842, ccw, fc);
+	crb->ccw = cpu_to_be32(ccw);
+
+	do {
+		wmem->start = ktime_get();
+		preempt_disable();
+		txwin = this_cpu_read(cpu_txwin);
+
+		/*
+		 * VAS copy CRB into L2 cache. Refer <asm/vas.h>.
+		 * @crb and @offset.
+		 */
+		vas_copy_crb(crb, 0);
+
+		/*
+		 * VAS paste previously copied CRB to NX.
+		 * @txwin, @offset and @last (must be true).
+		 */
+		ret = vas_paste_crb(txwin, 0, 1);
+		preempt_enable();
+		/*
+		 * Retry copy/paste function for VAS failures.
+		 */
+	} while (ret && (i++ < VAS_RETRIES));
+
+	if (ret) {
+		pr_err_ratelimited("VAS copy/paste failed\n");
+		return ret;
+	}
+
+	ret = wait_for_csb(wmem, csb);
+	if (!ret)
+		*outlenp = be32_to_cpu(csb->count);
+
+	return ret;
+}
+
+/**
+ * nx842_powernv_compress - Compress data using the 842 algorithm
+ *
+ * Compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * The input buffer is compressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * compressed data.  If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ *           nx842_powernv_driver.workmem_size
+ *
+ * Returns: see @nx842_powernv_exec()
+ */
+static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
+				  unsigned char *out, unsigned int *outlenp,
+				  void *wmem)
+{
+	return nx842_powernv_exec(in, inlen, out, outlenp,
+				      wmem, CCW_FC_842_COMP_CRC);
+}
+
+/**
+ * nx842_powernv_decompress - Decompress data using the 842 algorithm
+ *
+ * Decompression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * The input buffer is decompressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * decompressed data.  If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ *           nx842_powernv_driver.workmem_size
+ *
+ * Returns: see @nx842_powernv_exec()
+ */
+static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
+				    unsigned char *out, unsigned int *outlenp,
+				    void *wmem)
+{
+	return nx842_powernv_exec(in, inlen, out, outlenp,
+				      wmem, CCW_FC_842_DECOMP_CRC);
+}
+
+static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
+					int chipid)
+{
+	coproc->chip_id = chipid;
+	INIT_LIST_HEAD(&coproc->list);
+	list_add(&coproc->list, &nx842_coprocs);
+}
+
+static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
+{
+	struct vas_window *txwin = NULL;
+	struct vas_tx_win_attr txattr;
+
+	/*
+	 * Kernel requests will be high priority. So open send
+	 * windows only for high priority RxFIFO entries.
+	 */
+	vas_init_tx_win_attr(&txattr, coproc->ct);
+	txattr.lpid = 0;	/* lpid is 0 for kernel requests */
+	txattr.pid = 0;		/* pid is 0 for kernel requests */
+
+	/*
+	 * Open a VAS send window which is used to send request to NX.
+	 */
+	txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
+	if (IS_ERR(txwin))
+		pr_err("ibm,nx-842: Can not open TX window: %ld\n",
+				PTR_ERR(txwin));
+
+	return txwin;
+}
+
+/*
+ * Identify chip ID for each CPU, open send wndow for the corresponding NX
+ * engine and save txwin in percpu cpu_txwin.
+ * cpu_txwin is used in copy/paste operation for each compression /
+ * decompression request.
+ */
+static int nx842_open_percpu_txwins(void)
+{
+	struct nx842_coproc *coproc, *n;
+	unsigned int i, chip_id;
+
+	for_each_possible_cpu(i) {
+		struct vas_window *txwin = NULL;
+
+		chip_id = cpu_to_chip_id(i);
+
+		list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+			/*
+			 * Kernel requests use only high priority FIFOs. So
+			 * open send windows for these FIFOs.
+			 */
+
+			if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
+				continue;
+
+			if (coproc->chip_id == chip_id) {
+				txwin = nx842_alloc_txwin(coproc);
+				if (IS_ERR(txwin))
+					return PTR_ERR(txwin);
+
+				per_cpu(cpu_txwin, i) = txwin;
+				break;
+			}
+		}
+
+		if (!per_cpu(cpu_txwin, i)) {
+			/* shouldn't happen, Each chip will have NX engine */
+			pr_err("NX engine is not available for CPU %d\n", i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
+					int vasid, int *ct)
+{
+	struct vas_window *rxwin = NULL;
+	struct vas_rx_win_attr rxattr;
+	struct nx842_coproc *coproc;
+	u32 lpid, pid, tid, fifo_size;
+	u64 rx_fifo;
+	const char *priority;
+	int ret;
+
+	ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo);
+	if (ret) {
+		pr_err("Missing rx-fifo-address property\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size);
+	if (ret) {
+		pr_err("Missing rx-fifo-size property\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dn, "lpid", &lpid);
+	if (ret) {
+		pr_err("Missing lpid property\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dn, "pid", &pid);
+	if (ret) {
+		pr_err("Missing pid property\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(dn, "tid", &tid);
+	if (ret) {
+		pr_err("Missing tid property\n");
+		return ret;
+	}
+
+	ret = of_property_read_string(dn, "priority", &priority);
+	if (ret) {
+		pr_err("Missing priority property\n");
+		return ret;
+	}
+
+	coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
+	if (!coproc)
+		return -ENOMEM;
+
+	if (!strcmp(priority, "High"))
+		coproc->ct = VAS_COP_TYPE_842_HIPRI;
+	else if (!strcmp(priority, "Normal"))
+		coproc->ct = VAS_COP_TYPE_842;
+	else {
+		pr_err("Invalid RxFIFO priority value\n");
+		ret =  -EINVAL;
+		goto err_out;
+	}
+
+	vas_init_rx_win_attr(&rxattr, coproc->ct);
+	rxattr.rx_fifo = (void *)rx_fifo;
+	rxattr.rx_fifo_size = fifo_size;
+	rxattr.lnotify_lpid = lpid;
+	rxattr.lnotify_pid = pid;
+	rxattr.lnotify_tid = tid;
+	rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
+
+	/*
+	 * Open a VAS receice window which is used to configure RxFIFO
+	 * for NX.
+	 */
+	rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr);
+	if (IS_ERR(rxwin)) {
+		ret = PTR_ERR(rxwin);
+		pr_err("setting RxFIFO with VAS failed: %d\n",
+			ret);
+		goto err_out;
+	}
+
+	coproc->vas.rxwin = rxwin;
+	coproc->vas.id = vasid;
+	nx842_add_coprocs_list(coproc, chip_id);
+
+	/*
+	 * (lpid, pid, tid) combination has to be unique for each
+	 * coprocessor instance in the system. So to make it
+	 * unique, skiboot uses coprocessor type such as 842 or
+	 * GZIP for pid and provides this value to kernel in pid
+	 * device-tree property.
+	 */
+	*ct = pid;
+
+	return 0;
+
+err_out:
+	kfree(coproc);
+	return ret;
+}
+
+
+static int __init nx842_powernv_probe_vas(struct device_node *pn)
+{
+	struct device_node *dn;
+	int chip_id, vasid, ret = 0;
+	int nx_fifo_found = 0;
+	int uninitialized_var(ct);
+
+	chip_id = of_get_ibm_chip_id(pn);
+	if (chip_id < 0) {
+		pr_err("ibm,chip-id missing\n");
+		return -EINVAL;
+	}
+
+	vasid = chip_to_vas_id(chip_id);
+	if (vasid < 0) {
+		pr_err("Unable to map chip_id %d to vasid\n", chip_id);
+		return -EINVAL;
+	}
+
+	for_each_child_of_node(pn, dn) {
+		if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
+			ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct);
+			if (ret) {
+				of_node_put(dn);
+				return ret;
+			}
+			nx_fifo_found++;
+		}
+	}
+
+	if (!nx_fifo_found) {
+		pr_err("NX842 FIFO nodes are missing\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Initialize NX instance for both high and normal priority FIFOs.
+	 */
+	if (opal_check_token(OPAL_NX_COPROC_INIT)) {
+		ret = opal_nx_coproc_init(chip_id, ct);
+		if (ret) {
+			pr_err("Failed to initialize NX for chip(%d): %d\n",
+				chip_id, ret);
+			ret = opal_error_code(ret);
+		}
+	} else
+		pr_warn("Firmware doesn't support NX initialization\n");
+
+	return ret;
+}
+
+static int __init nx842_powernv_probe(struct device_node *dn)
+{
+	struct nx842_coproc *coproc;
+	unsigned int ct, ci;
+	int chip_id;
+
+	chip_id = of_get_ibm_chip_id(dn);
+	if (chip_id < 0) {
+		pr_err("ibm,chip-id missing\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) {
+		pr_err("ibm,842-coprocessor-type missing\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) {
+		pr_err("ibm,842-coprocessor-instance missing\n");
+		return -EINVAL;
+	}
+
+	coproc = kmalloc(sizeof(*coproc), GFP_KERNEL);
+	if (!coproc)
+		return -ENOMEM;
+
+	coproc->ct = ct;
+	coproc->ci = ci;
+	nx842_add_coprocs_list(coproc, chip_id);
+
+	pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
+
+	if (!nx842_ct)
+		nx842_ct = ct;
+	else if (nx842_ct != ct)
+		pr_err("NX842 chip %d, CT %d != first found CT %d\n",
+		       chip_id, ct, nx842_ct);
+
+	return 0;
+}
+
+static void nx842_delete_coprocs(void)
+{
+	struct nx842_coproc *coproc, *n;
+	struct vas_window *txwin;
+	int i;
+
+	/*
+	 * close percpu txwins that are opened for the corresponding coproc.
+	 */
+	for_each_possible_cpu(i) {
+		txwin = per_cpu(cpu_txwin, i);
+		if (txwin)
+			vas_win_close(txwin);
+
+		per_cpu(cpu_txwin, i) = 0;
+	}
+
+	list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+		if (coproc->vas.rxwin)
+			vas_win_close(coproc->vas.rxwin);
+
+		list_del(&coproc->list);
+		kfree(coproc);
+	}
+}
+
+static struct nx842_constraints nx842_powernv_constraints = {
+	.alignment =	DDE_BUFFER_ALIGN,
+	.multiple =	DDE_BUFFER_LAST_MULT,
+	.minimum =	DDE_BUFFER_LAST_MULT,
+	.maximum =	(DDL_LEN_MAX - 1) * PAGE_SIZE,
+};
+
+static struct nx842_driver nx842_powernv_driver = {
+	.name =		KBUILD_MODNAME,
+	.owner =	THIS_MODULE,
+	.workmem_size =	sizeof(struct nx842_workmem),
+	.constraints =	&nx842_powernv_constraints,
+	.compress =	nx842_powernv_compress,
+	.decompress =	nx842_powernv_decompress,
+};
+
+static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+{
+	return nx842_crypto_init(tfm, &nx842_powernv_driver);
+}
+
+static struct crypto_alg nx842_powernv_alg = {
+	.cra_name		= "842",
+	.cra_driver_name	= "842-nx",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_init		= nx842_powernv_crypto_init,
+	.cra_exit		= nx842_crypto_exit,
+	.cra_u			= { .compress = {
+	.coa_compress		= nx842_crypto_compress,
+	.coa_decompress		= nx842_crypto_decompress } }
+};
+
+static __init int nx842_powernv_init(void)
+{
+	struct device_node *dn;
+	int ret;
+
+	/* verify workmem size/align restrictions */
+	BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
+	BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN);
+	BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN);
+	/* verify buffer size/align restrictions */
+	BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN);
+	BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
+	BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
+
+	for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
+		ret = nx842_powernv_probe_vas(dn);
+		if (ret) {
+			nx842_delete_coprocs();
+			return ret;
+		}
+	}
+
+	if (list_empty(&nx842_coprocs)) {
+		for_each_compatible_node(dn, NULL, "ibm,power-nx")
+			nx842_powernv_probe(dn);
+
+		if (!nx842_ct)
+			return -ENODEV;
+
+		nx842_powernv_exec = nx842_exec_icswx;
+	} else {
+		ret = nx842_open_percpu_txwins();
+		if (ret) {
+			nx842_delete_coprocs();
+			return ret;
+		}
+
+		nx842_powernv_exec = nx842_exec_vas;
+	}
+
+	ret = crypto_register_alg(&nx842_powernv_alg);
+	if (ret) {
+		nx842_delete_coprocs();
+		return ret;
+	}
+
+	return 0;
+}
+module_init(nx842_powernv_init);
+
+static void __exit nx842_powernv_exit(void)
+{
+	crypto_unregister_alg(&nx842_powernv_alg);
+
+	nx842_delete_coprocs();
+}
+module_exit(nx842_powernv_exit);
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
new file mode 100644
index 0000000..6686997
--- /dev/null
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -0,0 +1,1146 @@
+/*
+ * Driver for IBM Power 842 compression accelerator
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ *          Seth Jennings <sjenning@linux.vnet.ibm.com>
+ */
+
+#include <asm/vio.h>
+
+#include "nx-842.h"
+#include "nx_csbcpb.h" /* struct nx_csbcpb */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
+
+static struct nx842_constraints nx842_pseries_constraints = {
+	.alignment =	DDE_BUFFER_ALIGN,
+	.multiple =	DDE_BUFFER_LAST_MULT,
+	.minimum =	DDE_BUFFER_LAST_MULT,
+	.maximum =	PAGE_SIZE, /* dynamic, max_sync_size */
+};
+
+static int check_constraints(unsigned long buf, unsigned int *len, bool in)
+{
+	if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
+		pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
+			 in ? "input" : "output", buf,
+			 nx842_pseries_constraints.alignment);
+		return -EINVAL;
+	}
+	if (*len % nx842_pseries_constraints.multiple) {
+		pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
+			 in ? "input" : "output", *len,
+			 nx842_pseries_constraints.multiple);
+		if (in)
+			return -EINVAL;
+		*len = round_down(*len, nx842_pseries_constraints.multiple);
+	}
+	if (*len < nx842_pseries_constraints.minimum) {
+		pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
+			 in ? "input" : "output", *len,
+			 nx842_pseries_constraints.minimum);
+		return -EINVAL;
+	}
+	if (*len > nx842_pseries_constraints.maximum) {
+		pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
+			 in ? "input" : "output", *len,
+			 nx842_pseries_constraints.maximum);
+		if (in)
+			return -EINVAL;
+		*len = nx842_pseries_constraints.maximum;
+	}
+	return 0;
+}
+
+/* I assume we need to align the CSB? */
+#define WORKMEM_ALIGN	(256)
+
+struct nx842_workmem {
+	/* scatterlist */
+	char slin[4096];
+	char slout[4096];
+	/* coprocessor status/parameter block */
+	struct nx_csbcpb csbcpb;
+
+	char padding[WORKMEM_ALIGN];
+} __aligned(WORKMEM_ALIGN);
+
+/* Macros for fields within nx_csbcpb */
+/* Check the valid bit within the csbcpb valid field */
+#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
+
+/* CE macros operate on the completion_extension field bits in the csbcpb.
+ * CE0 0=full completion, 1=partial completion
+ * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
+ * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
+#define NX842_CSBCPB_CE0(x)	(x & BIT_MASK(7))
+#define NX842_CSBCPB_CE1(x)	(x & BIT_MASK(6))
+#define NX842_CSBCPB_CE2(x)	(x & BIT_MASK(5))
+
+/* The NX unit accepts data only on 4K page boundaries */
+#define NX842_HW_PAGE_SIZE	(4096)
+#define NX842_HW_PAGE_MASK	(~(NX842_HW_PAGE_SIZE-1))
+
+struct ibm_nx842_counters {
+	atomic64_t comp_complete;
+	atomic64_t comp_failed;
+	atomic64_t decomp_complete;
+	atomic64_t decomp_failed;
+	atomic64_t swdecomp;
+	atomic64_t comp_times[32];
+	atomic64_t decomp_times[32];
+};
+
+static struct nx842_devdata {
+	struct vio_dev *vdev;
+	struct device *dev;
+	struct ibm_nx842_counters *counters;
+	unsigned int max_sg_len;
+	unsigned int max_sync_size;
+	unsigned int max_sync_sg;
+} __rcu *devdata;
+static DEFINE_SPINLOCK(devdata_mutex);
+
+#define NX842_COUNTER_INC(_x) \
+static inline void nx842_inc_##_x( \
+	const struct nx842_devdata *dev) { \
+	if (dev) \
+		atomic64_inc(&dev->counters->_x); \
+}
+NX842_COUNTER_INC(comp_complete);
+NX842_COUNTER_INC(comp_failed);
+NX842_COUNTER_INC(decomp_complete);
+NX842_COUNTER_INC(decomp_failed);
+NX842_COUNTER_INC(swdecomp);
+
+#define NX842_HIST_SLOTS 16
+
+static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
+{
+	int bucket = fls(time);
+
+	if (bucket)
+		bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
+
+	atomic64_inc(&times[bucket]);
+}
+
+/* NX unit operation flags */
+#define NX842_OP_COMPRESS	0x0
+#define NX842_OP_CRC		0x1
+#define NX842_OP_DECOMPRESS	0x2
+#define NX842_OP_COMPRESS_CRC   (NX842_OP_COMPRESS | NX842_OP_CRC)
+#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
+#define NX842_OP_ASYNC		(1<<23)
+#define NX842_OP_NOTIFY		(1<<22)
+#define NX842_OP_NOTIFY_INT(x)	((x & 0xff)<<8)
+
+static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
+{
+	/* No use of DMA mappings within the driver. */
+	return 0;
+}
+
+struct nx842_slentry {
+	__be64 ptr; /* Real address (use __pa()) */
+	__be64 len;
+};
+
+/* pHyp scatterlist entry */
+struct nx842_scatterlist {
+	int entry_nr; /* number of slentries */
+	struct nx842_slentry *entries; /* ptr to array of slentries */
+};
+
+/* Does not include sizeof(entry_nr) in the size */
+static inline unsigned long nx842_get_scatterlist_size(
+				struct nx842_scatterlist *sl)
+{
+	return sl->entry_nr * sizeof(struct nx842_slentry);
+}
+
+static int nx842_build_scatterlist(unsigned long buf, int len,
+			struct nx842_scatterlist *sl)
+{
+	unsigned long entrylen;
+	struct nx842_slentry *entry;
+
+	sl->entry_nr = 0;
+
+	entry = sl->entries;
+	while (len) {
+		entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
+		entrylen = min_t(int, len,
+				 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
+		entry->len = cpu_to_be64(entrylen);
+
+		len -= entrylen;
+		buf += entrylen;
+
+		sl->entry_nr++;
+		entry++;
+	}
+
+	return 0;
+}
+
+static int nx842_validate_result(struct device *dev,
+	struct cop_status_block *csb)
+{
+	/* The csb must be valid after returning from vio_h_cop_sync */
+	if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
+		dev_err(dev, "%s: cspcbp not valid upon completion.\n",
+				__func__);
+		dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
+				csb->valid,
+				csb->crb_seq_number,
+				csb->completion_code,
+				csb->completion_extension);
+		dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
+				be32_to_cpu(csb->processed_byte_count),
+				(unsigned long)be64_to_cpu(csb->address));
+		return -EIO;
+	}
+
+	/* Check return values from the hardware in the CSB */
+	switch (csb->completion_code) {
+	case 0:	/* Completed without error */
+		break;
+	case 64: /* Compression ok, but output larger than input */
+		dev_dbg(dev, "%s: output size larger than input size\n",
+					__func__);
+		break;
+	case 13: /* Output buffer too small */
+		dev_dbg(dev, "%s: Out of space in output buffer\n",
+					__func__);
+		return -ENOSPC;
+	case 65: /* Calculated CRC doesn't match the passed value */
+		dev_dbg(dev, "%s: CRC mismatch for decompression\n",
+					__func__);
+		return -EINVAL;
+	case 66: /* Input data contains an illegal template field */
+	case 67: /* Template indicates data past the end of the input stream */
+		dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
+					__func__, csb->completion_code);
+		return -EINVAL;
+	default:
+		dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
+					__func__, csb->completion_code);
+		return -EIO;
+	}
+
+	/* Hardware sanity check */
+	if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
+		dev_err(dev, "%s: No error returned by hardware, but "
+				"data returned is unusable, contact support.\n"
+				"(Additional info: csbcbp->processed bytes "
+				"does not specify processed bytes for the "
+				"target buffer.)\n", __func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * nx842_pseries_compress - Compress data using the 842 algorithm
+ *
+ * Compression provide by the NX842 coprocessor on IBM Power systems.
+ * The input buffer is compressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * compressed data.  If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: Pointer to input buffer
+ * @inlen: Length of input buffer
+ * @out: Pointer to output buffer
+ * @outlen: Length of output buffer
+ * @wrkmem: ptr to buffer for working memory, size determined by
+ *          nx842_pseries_driver.workmem_size
+ *
+ * Returns:
+ *   0		Success, output of length @outlen stored in the buffer at @out
+ *   -ENOMEM	Unable to allocate internal buffers
+ *   -ENOSPC	Output buffer is to small
+ *   -EIO	Internal error
+ *   -ENODEV	Hardware unavailable
+ */
+static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
+				  unsigned char *out, unsigned int *outlen,
+				  void *wmem)
+{
+	struct nx842_devdata *local_devdata;
+	struct device *dev = NULL;
+	struct nx842_workmem *workmem;
+	struct nx842_scatterlist slin, slout;
+	struct nx_csbcpb *csbcpb;
+	int ret = 0, max_sync_size;
+	unsigned long inbuf, outbuf;
+	struct vio_pfo_op op = {
+		.done = NULL,
+		.handle = 0,
+		.timeout = 0,
+	};
+	unsigned long start = get_tb();
+
+	inbuf = (unsigned long)in;
+	if (check_constraints(inbuf, &inlen, true))
+		return -EINVAL;
+
+	outbuf = (unsigned long)out;
+	if (check_constraints(outbuf, outlen, false))
+		return -EINVAL;
+
+	rcu_read_lock();
+	local_devdata = rcu_dereference(devdata);
+	if (!local_devdata || !local_devdata->dev) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	max_sync_size = local_devdata->max_sync_size;
+	dev = local_devdata->dev;
+
+	/* Init scatterlist */
+	workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
+	slin.entries = (struct nx842_slentry *)workmem->slin;
+	slout.entries = (struct nx842_slentry *)workmem->slout;
+
+	/* Init operation */
+	op.flags = NX842_OP_COMPRESS_CRC;
+	csbcpb = &workmem->csbcpb;
+	memset(csbcpb, 0, sizeof(*csbcpb));
+	op.csbcpb = nx842_get_pa(csbcpb);
+
+	if ((inbuf & NX842_HW_PAGE_MASK) ==
+	    ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
+		/* Create direct DDE */
+		op.in = nx842_get_pa((void *)inbuf);
+		op.inlen = inlen;
+	} else {
+		/* Create indirect DDE (scatterlist) */
+		nx842_build_scatterlist(inbuf, inlen, &slin);
+		op.in = nx842_get_pa(slin.entries);
+		op.inlen = -nx842_get_scatterlist_size(&slin);
+	}
+
+	if ((outbuf & NX842_HW_PAGE_MASK) ==
+	    ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
+		/* Create direct DDE */
+		op.out = nx842_get_pa((void *)outbuf);
+		op.outlen = *outlen;
+	} else {
+		/* Create indirect DDE (scatterlist) */
+		nx842_build_scatterlist(outbuf, *outlen, &slout);
+		op.out = nx842_get_pa(slout.entries);
+		op.outlen = -nx842_get_scatterlist_size(&slout);
+	}
+
+	dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
+		__func__, (unsigned long)op.in, (long)op.inlen,
+		(unsigned long)op.out, (long)op.outlen);
+
+	/* Send request to pHyp */
+	ret = vio_h_cop_sync(local_devdata->vdev, &op);
+
+	/* Check for pHyp error */
+	if (ret) {
+		dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
+			__func__, ret, op.hcall_err);
+		ret = -EIO;
+		goto unlock;
+	}
+
+	/* Check for hardware error */
+	ret = nx842_validate_result(dev, &csbcpb->csb);
+	if (ret)
+		goto unlock;
+
+	*outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
+	dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
+
+unlock:
+	if (ret)
+		nx842_inc_comp_failed(local_devdata);
+	else {
+		nx842_inc_comp_complete(local_devdata);
+		ibm_nx842_incr_hist(local_devdata->counters->comp_times,
+			(get_tb() - start) / tb_ticks_per_usec);
+	}
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * nx842_pseries_decompress - Decompress data using the 842 algorithm
+ *
+ * Decompression provide by the NX842 coprocessor on IBM Power systems.
+ * The input buffer is decompressed and the result is stored in the
+ * provided output buffer.  The size allocated to the output buffer is
+ * provided by the caller of this function in @outlen.  Upon return from
+ * this function @outlen contains the length of the decompressed data.
+ * If there is an error then @outlen will be 0 and an error will be
+ * specified by the return code from this function.
+ *
+ * @in: Pointer to input buffer
+ * @inlen: Length of input buffer
+ * @out: Pointer to output buffer
+ * @outlen: Length of output buffer
+ * @wrkmem: ptr to buffer for working memory, size determined by
+ *          nx842_pseries_driver.workmem_size
+ *
+ * Returns:
+ *   0		Success, output of length @outlen stored in the buffer at @out
+ *   -ENODEV	Hardware decompression device is unavailable
+ *   -ENOMEM	Unable to allocate internal buffers
+ *   -ENOSPC	Output buffer is to small
+ *   -EINVAL	Bad input data encountered when attempting decompress
+ *   -EIO	Internal error
+ */
+static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
+				    unsigned char *out, unsigned int *outlen,
+				    void *wmem)
+{
+	struct nx842_devdata *local_devdata;
+	struct device *dev = NULL;
+	struct nx842_workmem *workmem;
+	struct nx842_scatterlist slin, slout;
+	struct nx_csbcpb *csbcpb;
+	int ret = 0, max_sync_size;
+	unsigned long inbuf, outbuf;
+	struct vio_pfo_op op = {
+		.done = NULL,
+		.handle = 0,
+		.timeout = 0,
+	};
+	unsigned long start = get_tb();
+
+	/* Ensure page alignment and size */
+	inbuf = (unsigned long)in;
+	if (check_constraints(inbuf, &inlen, true))
+		return -EINVAL;
+
+	outbuf = (unsigned long)out;
+	if (check_constraints(outbuf, outlen, false))
+		return -EINVAL;
+
+	rcu_read_lock();
+	local_devdata = rcu_dereference(devdata);
+	if (!local_devdata || !local_devdata->dev) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+	max_sync_size = local_devdata->max_sync_size;
+	dev = local_devdata->dev;
+
+	workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
+
+	/* Init scatterlist */
+	slin.entries = (struct nx842_slentry *)workmem->slin;
+	slout.entries = (struct nx842_slentry *)workmem->slout;
+
+	/* Init operation */
+	op.flags = NX842_OP_DECOMPRESS_CRC;
+	csbcpb = &workmem->csbcpb;
+	memset(csbcpb, 0, sizeof(*csbcpb));
+	op.csbcpb = nx842_get_pa(csbcpb);
+
+	if ((inbuf & NX842_HW_PAGE_MASK) ==
+	    ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
+		/* Create direct DDE */
+		op.in = nx842_get_pa((void *)inbuf);
+		op.inlen = inlen;
+	} else {
+		/* Create indirect DDE (scatterlist) */
+		nx842_build_scatterlist(inbuf, inlen, &slin);
+		op.in = nx842_get_pa(slin.entries);
+		op.inlen = -nx842_get_scatterlist_size(&slin);
+	}
+
+	if ((outbuf & NX842_HW_PAGE_MASK) ==
+	    ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
+		/* Create direct DDE */
+		op.out = nx842_get_pa((void *)outbuf);
+		op.outlen = *outlen;
+	} else {
+		/* Create indirect DDE (scatterlist) */
+		nx842_build_scatterlist(outbuf, *outlen, &slout);
+		op.out = nx842_get_pa(slout.entries);
+		op.outlen = -nx842_get_scatterlist_size(&slout);
+	}
+
+	dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
+		__func__, (unsigned long)op.in, (long)op.inlen,
+		(unsigned long)op.out, (long)op.outlen);
+
+	/* Send request to pHyp */
+	ret = vio_h_cop_sync(local_devdata->vdev, &op);
+
+	/* Check for pHyp error */
+	if (ret) {
+		dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
+			__func__, ret, op.hcall_err);
+		goto unlock;
+	}
+
+	/* Check for hardware error */
+	ret = nx842_validate_result(dev, &csbcpb->csb);
+	if (ret)
+		goto unlock;
+
+	*outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
+
+unlock:
+	if (ret)
+		/* decompress fail */
+		nx842_inc_decomp_failed(local_devdata);
+	else {
+		nx842_inc_decomp_complete(local_devdata);
+		ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
+			(get_tb() - start) / tb_ticks_per_usec);
+	}
+
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * nx842_OF_set_defaults -- Set default (disabled) values for devdata
+ *
+ * @devdata - struct nx842_devdata to update
+ *
+ * Returns:
+ *  0 on success
+ *  -ENOENT if @devdata ptr is NULL
+ */
+static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
+{
+	if (devdata) {
+		devdata->max_sync_size = 0;
+		devdata->max_sync_sg = 0;
+		devdata->max_sg_len = 0;
+		return 0;
+	} else
+		return -ENOENT;
+}
+
+/**
+ * nx842_OF_upd_status -- Check the device info from OF status prop
+ *
+ * The status property indicates if the accelerator is enabled.  If the
+ * device is in the OF tree it indicates that the hardware is present.
+ * The status field indicates if the device is enabled when the status
+ * is 'okay'.  Otherwise the device driver will be disabled.
+ *
+ * @prop - struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ *  0 - Device is available
+ *  -ENODEV - Device is not available
+ */
+static int nx842_OF_upd_status(struct property *prop)
+{
+	const char *status = (const char *)prop->value;
+
+	if (!strncmp(status, "okay", (size_t)prop->length))
+		return 0;
+	if (!strncmp(status, "disabled", (size_t)prop->length))
+		return -ENODEV;
+	dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
+
+	return -EINVAL;
+}
+
+/**
+ * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
+ *
+ * Definition of the 'ibm,max-sg-len' OF property:
+ *  This field indicates the maximum byte length of a scatter list
+ *  for the platform facility. It is a single cell encoded as with encode-int.
+ *
+ * Example:
+ *  # od -x ibm,max-sg-len
+ *  0000000 0000 0ff0
+ *
+ *  In this example, the maximum byte length of a scatter list is
+ *  0x0ff0 (4,080).
+ *
+ * @devdata - struct nx842_devdata to update
+ * @prop - struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ *  0 on success
+ *  -EINVAL on failure
+ */
+static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
+					struct property *prop) {
+	int ret = 0;
+	const unsigned int maxsglen = of_read_number(prop->value, 1);
+
+	if (prop->length != sizeof(maxsglen)) {
+		dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
+		dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
+				prop->length, sizeof(maxsglen));
+		ret = -EINVAL;
+	} else {
+		devdata->max_sg_len = min_t(unsigned int,
+					    maxsglen, NX842_HW_PAGE_SIZE);
+	}
+
+	return ret;
+}
+
+/**
+ * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
+ *
+ * Definition of the 'ibm,max-sync-cop' OF property:
+ *  Two series of cells.  The first series of cells represents the maximums
+ *  that can be synchronously compressed. The second series of cells
+ *  represents the maximums that can be synchronously decompressed.
+ *  1. The first cell in each series contains the count of the number of
+ *     data length, scatter list elements pairs that follow – each being
+ *     of the form
+ *    a. One cell data byte length
+ *    b. One cell total number of scatter list elements
+ *
+ * Example:
+ *  # od -x ibm,max-sync-cop
+ *  0000000 0000 0001 0000 1000 0000 01fe 0000 0001
+ *  0000020 0000 1000 0000 01fe
+ *
+ *  In this example, compression supports 0x1000 (4,096) data byte length
+ *  and 0x1fe (510) total scatter list elements.  Decompression supports
+ *  0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
+ *  elements.
+ *
+ * @devdata - struct nx842_devdata to update
+ * @prop - struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ *  0 on success
+ *  -EINVAL on failure
+ */
+static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
+					struct property *prop) {
+	int ret = 0;
+	unsigned int comp_data_limit, decomp_data_limit;
+	unsigned int comp_sg_limit, decomp_sg_limit;
+	const struct maxsynccop_t {
+		__be32 comp_elements;
+		__be32 comp_data_limit;
+		__be32 comp_sg_limit;
+		__be32 decomp_elements;
+		__be32 decomp_data_limit;
+		__be32 decomp_sg_limit;
+	} *maxsynccop;
+
+	if (prop->length != sizeof(*maxsynccop)) {
+		dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
+		dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
+				sizeof(*maxsynccop));
+		ret = -EINVAL;
+		goto out;
+	}
+
+	maxsynccop = (const struct maxsynccop_t *)prop->value;
+	comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
+	comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
+	decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
+	decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
+
+	/* Use one limit rather than separate limits for compression and
+	 * decompression. Set a maximum for this so as not to exceed the
+	 * size that the header can support and round the value down to
+	 * the hardware page size (4K) */
+	devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
+
+	devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
+					65536);
+
+	if (devdata->max_sync_size < 4096) {
+		dev_err(devdata->dev, "%s: hardware max data size (%u) is "
+				"less than the driver minimum, unable to use "
+				"the hardware device\n",
+				__func__, devdata->max_sync_size);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	nx842_pseries_constraints.maximum = devdata->max_sync_size;
+
+	devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
+	if (devdata->max_sync_sg < 1) {
+		dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
+				"less than the driver minimum, unable to use "
+				"the hardware device\n",
+				__func__, devdata->max_sync_sg);
+		ret = -EINVAL;
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+/**
+ *
+ * nx842_OF_upd -- Handle OF properties updates for the device.
+ *
+ * Set all properties from the OF tree.  Optionally, a new property
+ * can be provided by the @new_prop pointer to overwrite an existing value.
+ * The device will remain disabled until all values are valid, this function
+ * will return an error for updates unless all values are valid.
+ *
+ * @new_prop: If not NULL, this property is being updated.  If NULL, update
+ *  all properties from the current values in the OF tree.
+ *
+ * Returns:
+ *  0 - Success
+ *  -ENOMEM - Could not allocate memory for new devdata structure
+ *  -EINVAL - property value not found, new_prop is not a recognized
+ *	property for the device or property value is not valid.
+ *  -ENODEV - Device is not available
+ */
+static int nx842_OF_upd(struct property *new_prop)
+{
+	struct nx842_devdata *old_devdata = NULL;
+	struct nx842_devdata *new_devdata = NULL;
+	struct device_node *of_node = NULL;
+	struct property *status = NULL;
+	struct property *maxsglen = NULL;
+	struct property *maxsyncop = NULL;
+	int ret = 0;
+	unsigned long flags;
+
+	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+	if (!new_devdata)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&devdata_mutex, flags);
+	old_devdata = rcu_dereference_check(devdata,
+			lockdep_is_held(&devdata_mutex));
+	if (old_devdata)
+		of_node = old_devdata->dev->of_node;
+
+	if (!old_devdata || !of_node) {
+		pr_err("%s: device is not available\n", __func__);
+		spin_unlock_irqrestore(&devdata_mutex, flags);
+		kfree(new_devdata);
+		return -ENODEV;
+	}
+
+	memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
+	new_devdata->counters = old_devdata->counters;
+
+	/* Set ptrs for existing properties */
+	status = of_find_property(of_node, "status", NULL);
+	maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
+	maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
+	if (!status || !maxsglen || !maxsyncop) {
+		dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
+		ret = -EINVAL;
+		goto error_out;
+	}
+
+	/*
+	 * If this is a property update, there are only certain properties that
+	 * we care about. Bail if it isn't in the below list
+	 */
+	if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
+		         strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
+		         strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
+		goto out;
+
+	/* Perform property updates */
+	ret = nx842_OF_upd_status(status);
+	if (ret)
+		goto error_out;
+
+	ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
+	if (ret)
+		goto error_out;
+
+	ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
+	if (ret)
+		goto error_out;
+
+out:
+	dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
+			__func__, new_devdata->max_sync_size,
+			old_devdata->max_sync_size);
+	dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
+			__func__, new_devdata->max_sync_sg,
+			old_devdata->max_sync_sg);
+	dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
+			__func__, new_devdata->max_sg_len,
+			old_devdata->max_sg_len);
+
+	rcu_assign_pointer(devdata, new_devdata);
+	spin_unlock_irqrestore(&devdata_mutex, flags);
+	synchronize_rcu();
+	dev_set_drvdata(new_devdata->dev, new_devdata);
+	kfree(old_devdata);
+	return 0;
+
+error_out:
+	if (new_devdata) {
+		dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
+		nx842_OF_set_defaults(new_devdata);
+		rcu_assign_pointer(devdata, new_devdata);
+		spin_unlock_irqrestore(&devdata_mutex, flags);
+		synchronize_rcu();
+		dev_set_drvdata(new_devdata->dev, new_devdata);
+		kfree(old_devdata);
+	} else {
+		dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
+		spin_unlock_irqrestore(&devdata_mutex, flags);
+	}
+
+	if (!ret)
+		ret = -EINVAL;
+	return ret;
+}
+
+/**
+ * nx842_OF_notifier - Process updates to OF properties for the device
+ *
+ * @np: notifier block
+ * @action: notifier action
+ * @update: struct pSeries_reconfig_prop_update pointer if action is
+ *	PSERIES_UPDATE_PROPERTY
+ *
+ * Returns:
+ *	NOTIFY_OK on success
+ *	NOTIFY_BAD encoded with error number on failure, use
+ *		notifier_to_errno() to decode this value
+ */
+static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
+			     void *data)
+{
+	struct of_reconfig_data *upd = data;
+	struct nx842_devdata *local_devdata;
+	struct device_node *node = NULL;
+
+	rcu_read_lock();
+	local_devdata = rcu_dereference(devdata);
+	if (local_devdata)
+		node = local_devdata->dev->of_node;
+
+	if (local_devdata &&
+			action == OF_RECONFIG_UPDATE_PROPERTY &&
+			!strcmp(upd->dn->name, node->name)) {
+		rcu_read_unlock();
+		nx842_OF_upd(upd->prop);
+	} else
+		rcu_read_unlock();
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block nx842_of_nb = {
+	.notifier_call = nx842_OF_notifier,
+};
+
+#define nx842_counter_read(_name)					\
+static ssize_t nx842_##_name##_show(struct device *dev,		\
+		struct device_attribute *attr,				\
+		char *buf) {						\
+	struct nx842_devdata *local_devdata;			\
+	int p = 0;							\
+	rcu_read_lock();						\
+	local_devdata = rcu_dereference(devdata);			\
+	if (local_devdata)						\
+		p = snprintf(buf, PAGE_SIZE, "%ld\n",			\
+		       atomic64_read(&local_devdata->counters->_name));	\
+	rcu_read_unlock();						\
+	return p;							\
+}
+
+#define NX842DEV_COUNTER_ATTR_RO(_name)					\
+	nx842_counter_read(_name);					\
+	static struct device_attribute dev_attr_##_name = __ATTR(_name,	\
+						0444,			\
+						nx842_##_name##_show,\
+						NULL);
+
+NX842DEV_COUNTER_ATTR_RO(comp_complete);
+NX842DEV_COUNTER_ATTR_RO(comp_failed);
+NX842DEV_COUNTER_ATTR_RO(decomp_complete);
+NX842DEV_COUNTER_ATTR_RO(decomp_failed);
+NX842DEV_COUNTER_ATTR_RO(swdecomp);
+
+static ssize_t nx842_timehist_show(struct device *,
+		struct device_attribute *, char *);
+
+static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
+		nx842_timehist_show, NULL);
+static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
+		0444, nx842_timehist_show, NULL);
+
+static ssize_t nx842_timehist_show(struct device *dev,
+		struct device_attribute *attr, char *buf) {
+	char *p = buf;
+	struct nx842_devdata *local_devdata;
+	atomic64_t *times;
+	int bytes_remain = PAGE_SIZE;
+	int bytes;
+	int i;
+
+	rcu_read_lock();
+	local_devdata = rcu_dereference(devdata);
+	if (!local_devdata) {
+		rcu_read_unlock();
+		return 0;
+	}
+
+	if (attr == &dev_attr_comp_times)
+		times = local_devdata->counters->comp_times;
+	else if (attr == &dev_attr_decomp_times)
+		times = local_devdata->counters->decomp_times;
+	else {
+		rcu_read_unlock();
+		return 0;
+	}
+
+	for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
+		bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
+			       i ? (2<<(i-1)) : 0, (2<<i)-1,
+			       atomic64_read(&times[i]));
+		bytes_remain -= bytes;
+		p += bytes;
+	}
+	/* The last bucket holds everything over
+	 * 2<<(NX842_HIST_SLOTS - 2) us */
+	bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
+			2<<(NX842_HIST_SLOTS - 2),
+			atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
+	p += bytes;
+
+	rcu_read_unlock();
+	return p - buf;
+}
+
+static struct attribute *nx842_sysfs_entries[] = {
+	&dev_attr_comp_complete.attr,
+	&dev_attr_comp_failed.attr,
+	&dev_attr_decomp_complete.attr,
+	&dev_attr_decomp_failed.attr,
+	&dev_attr_swdecomp.attr,
+	&dev_attr_comp_times.attr,
+	&dev_attr_decomp_times.attr,
+	NULL,
+};
+
+static struct attribute_group nx842_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = nx842_sysfs_entries,
+};
+
+static struct nx842_driver nx842_pseries_driver = {
+	.name =		KBUILD_MODNAME,
+	.owner =	THIS_MODULE,
+	.workmem_size =	sizeof(struct nx842_workmem),
+	.constraints =	&nx842_pseries_constraints,
+	.compress =	nx842_pseries_compress,
+	.decompress =	nx842_pseries_decompress,
+};
+
+static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+{
+	return nx842_crypto_init(tfm, &nx842_pseries_driver);
+}
+
+static struct crypto_alg nx842_pseries_alg = {
+	.cra_name		= "842",
+	.cra_driver_name	= "842-nx",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_init		= nx842_pseries_crypto_init,
+	.cra_exit		= nx842_crypto_exit,
+	.cra_u			= { .compress = {
+	.coa_compress		= nx842_crypto_compress,
+	.coa_decompress		= nx842_crypto_decompress } }
+};
+
+static int nx842_probe(struct vio_dev *viodev,
+		       const struct vio_device_id *id)
+{
+	struct nx842_devdata *old_devdata, *new_devdata = NULL;
+	unsigned long flags;
+	int ret = 0;
+
+	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+	if (!new_devdata)
+		return -ENOMEM;
+
+	new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
+			GFP_NOFS);
+	if (!new_devdata->counters) {
+		kfree(new_devdata);
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&devdata_mutex, flags);
+	old_devdata = rcu_dereference_check(devdata,
+			lockdep_is_held(&devdata_mutex));
+
+	if (old_devdata && old_devdata->vdev != NULL) {
+		dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
+		ret = -1;
+		goto error_unlock;
+	}
+
+	dev_set_drvdata(&viodev->dev, NULL);
+
+	new_devdata->vdev = viodev;
+	new_devdata->dev = &viodev->dev;
+	nx842_OF_set_defaults(new_devdata);
+
+	rcu_assign_pointer(devdata, new_devdata);
+	spin_unlock_irqrestore(&devdata_mutex, flags);
+	synchronize_rcu();
+	kfree(old_devdata);
+
+	of_reconfig_notifier_register(&nx842_of_nb);
+
+	ret = nx842_OF_upd(NULL);
+	if (ret)
+		goto error;
+
+	ret = crypto_register_alg(&nx842_pseries_alg);
+	if (ret) {
+		dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
+		goto error;
+	}
+
+	rcu_read_lock();
+	dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
+	rcu_read_unlock();
+
+	if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
+		dev_err(&viodev->dev, "could not create sysfs device attributes\n");
+		ret = -1;
+		goto error;
+	}
+
+	return 0;
+
+error_unlock:
+	spin_unlock_irqrestore(&devdata_mutex, flags);
+	if (new_devdata)
+		kfree(new_devdata->counters);
+	kfree(new_devdata);
+error:
+	return ret;
+}
+
+static int nx842_remove(struct vio_dev *viodev)
+{
+	struct nx842_devdata *old_devdata;
+	unsigned long flags;
+
+	pr_info("Removing IBM Power 842 compression device\n");
+	sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
+
+	crypto_unregister_alg(&nx842_pseries_alg);
+
+	spin_lock_irqsave(&devdata_mutex, flags);
+	old_devdata = rcu_dereference_check(devdata,
+			lockdep_is_held(&devdata_mutex));
+	of_reconfig_notifier_unregister(&nx842_of_nb);
+	RCU_INIT_POINTER(devdata, NULL);
+	spin_unlock_irqrestore(&devdata_mutex, flags);
+	synchronize_rcu();
+	dev_set_drvdata(&viodev->dev, NULL);
+	if (old_devdata)
+		kfree(old_devdata->counters);
+	kfree(old_devdata);
+
+	return 0;
+}
+
+static const struct vio_device_id nx842_vio_driver_ids[] = {
+	{"ibm,compression-v1", "ibm,compression"},
+	{"", ""},
+};
+
+static struct vio_driver nx842_vio_driver = {
+	.name = KBUILD_MODNAME,
+	.probe = nx842_probe,
+	.remove = nx842_remove,
+	.get_desired_dma = nx842_get_desired_dma,
+	.id_table = nx842_vio_driver_ids,
+};
+
+static int __init nx842_pseries_init(void)
+{
+	struct nx842_devdata *new_devdata;
+	int ret;
+
+	if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
+		return -ENODEV;
+
+	RCU_INIT_POINTER(devdata, NULL);
+	new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
+	if (!new_devdata)
+		return -ENOMEM;
+
+	RCU_INIT_POINTER(devdata, new_devdata);
+
+	ret = vio_register_driver(&nx842_vio_driver);
+	if (ret) {
+		pr_err("Could not register VIO driver %d\n", ret);
+
+		kfree(new_devdata);
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(nx842_pseries_init);
+
+static void __exit nx842_pseries_exit(void)
+{
+	struct nx842_devdata *old_devdata;
+	unsigned long flags;
+
+	crypto_unregister_alg(&nx842_pseries_alg);
+
+	spin_lock_irqsave(&devdata_mutex, flags);
+	old_devdata = rcu_dereference_check(devdata,
+			lockdep_is_held(&devdata_mutex));
+	RCU_INIT_POINTER(devdata, NULL);
+	spin_unlock_irqrestore(&devdata_mutex, flags);
+	synchronize_rcu();
+	if (old_devdata && old_devdata->dev)
+		dev_set_drvdata(old_devdata->dev, NULL);
+	kfree(old_devdata);
+	vio_unregister_driver(&nx842_vio_driver);
+}
+
+module_exit(nx842_pseries_exit);
+
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
new file mode 100644
index 0000000..d94e25d
--- /dev/null
+++ b/drivers/crypto/nx/nx-842.c
@@ -0,0 +1,531 @@
+/*
+ * Cryptographic API for the NX-842 hardware compression.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) IBM Corporation, 2011-2015
+ *
+ * Designer of the Power data compression engine:
+ *   Bulent Abali <abali@us.ibm.com>
+ *
+ * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ *                   Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * Rewrite: Dan Streetman <ddstreet@ieee.org>
+ *
+ * This is an interface to the NX-842 compression hardware in PowerPC
+ * processors.  Most of the complexity of this drvier is due to the fact that
+ * the NX-842 compression hardware requires the input and output data buffers
+ * to be specifically aligned, to be a specific multiple in length, and within
+ * specific minimum and maximum lengths.  Those restrictions, provided by the
+ * nx-842 driver via nx842_constraints, mean this driver must use bounce
+ * buffers and headers to correct misaligned in or out buffers, and to split
+ * input buffers that are too large.
+ *
+ * This driver will fall back to software decompression if the hardware
+ * decompression fails, so this driver's decompression should never fail as
+ * long as the provided compressed buffer is valid.  Any compressed buffer
+ * created by this driver will have a header (except ones where the input
+ * perfectly matches the constraints); so users of this driver cannot simply
+ * pass a compressed buffer created by this driver over to the 842 software
+ * decompression library.  Instead, users must use this driver to decompress;
+ * if the hardware fails or is unavailable, the compressed buffer will be
+ * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
+ * software decompression library.
+ *
+ * This does not fall back to software compression, however, since the caller
+ * of this function is specifically requesting hardware compression; if the
+ * hardware compression fails, the caller can fall back to software
+ * compression, and the raw 842 compressed buffer that the software compressor
+ * creates can be passed to this driver for hardware decompression; any
+ * buffer without our specific header magic is assumed to be a raw 842 buffer
+ * and passed directly to the hardware.  Note that the software compression
+ * library will produce a compressed buffer that is incompatible with the
+ * hardware decompressor if the original input buffer length is not a multiple
+ * of 8; if such a compressed buffer is passed to this driver for
+ * decompression, the hardware will reject it and this driver will then pass
+ * it over to the software library for decompression.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/vmalloc.h>
+#include <linux/sw842.h>
+#include <linux/spinlock.h>
+
+#include "nx-842.h"
+
+/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
+ * template (see lib/842/842.h), so this magic number will never appear at
+ * the start of a raw 842 compressed buffer.  That is important, as any buffer
+ * passed to us without this magic is assumed to be a raw 842 compressed
+ * buffer, and passed directly to the hardware to decompress.
+ */
+#define NX842_CRYPTO_MAGIC	(0xf842)
+#define NX842_CRYPTO_HEADER_SIZE(g)				\
+	(sizeof(struct nx842_crypto_header) +			\
+	 sizeof(struct nx842_crypto_header_group) * (g))
+#define NX842_CRYPTO_HEADER_MAX_SIZE				\
+	NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
+
+/* bounce buffer size */
+#define BOUNCE_BUFFER_ORDER	(2)
+#define BOUNCE_BUFFER_SIZE					\
+	((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
+
+/* try longer on comp because we can fallback to sw decomp if hw is busy */
+#define COMP_BUSY_TIMEOUT	(250) /* ms */
+#define DECOMP_BUSY_TIMEOUT	(50) /* ms */
+
+struct nx842_crypto_param {
+	u8 *in;
+	unsigned int iremain;
+	u8 *out;
+	unsigned int oremain;
+	unsigned int ototal;
+};
+
+static int update_param(struct nx842_crypto_param *p,
+			unsigned int slen, unsigned int dlen)
+{
+	if (p->iremain < slen)
+		return -EOVERFLOW;
+	if (p->oremain < dlen)
+		return -ENOSPC;
+
+	p->in += slen;
+	p->iremain -= slen;
+	p->out += dlen;
+	p->oremain -= dlen;
+	p->ototal += dlen;
+
+	return 0;
+}
+
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
+{
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	spin_lock_init(&ctx->lock);
+	ctx->driver = driver;
+	ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+	ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+	ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+	if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
+		kfree(ctx->wmem);
+		free_page((unsigned long)ctx->sbounce);
+		free_page((unsigned long)ctx->dbounce);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_init);
+
+void nx842_crypto_exit(struct crypto_tfm *tfm)
+{
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	kfree(ctx->wmem);
+	free_page((unsigned long)ctx->sbounce);
+	free_page((unsigned long)ctx->dbounce);
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_exit);
+
+static void check_constraints(struct nx842_constraints *c)
+{
+	/* limit maximum, to always have enough bounce buffer to decompress */
+	if (c->maximum > BOUNCE_BUFFER_SIZE)
+		c->maximum = BOUNCE_BUFFER_SIZE;
+}
+
+static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
+{
+	int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+
+	/* compress should have added space for header */
+	if (s > be16_to_cpu(hdr->group[0].padding)) {
+		pr_err("Internal error: no space for header\n");
+		return -EINVAL;
+	}
+
+	memcpy(buf, hdr, s);
+
+	print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
+
+	return 0;
+}
+
+static int compress(struct nx842_crypto_ctx *ctx,
+		    struct nx842_crypto_param *p,
+		    struct nx842_crypto_header_group *g,
+		    struct nx842_constraints *c,
+		    u16 *ignore,
+		    unsigned int hdrsize)
+{
+	unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
+	unsigned int adj_slen = slen;
+	u8 *src = p->in, *dst = p->out;
+	int ret, dskip = 0;
+	ktime_t timeout;
+
+	if (p->iremain == 0)
+		return -EOVERFLOW;
+
+	if (p->oremain == 0 || hdrsize + c->minimum > dlen)
+		return -ENOSPC;
+
+	if (slen % c->multiple)
+		adj_slen = round_up(slen, c->multiple);
+	if (slen < c->minimum)
+		adj_slen = c->minimum;
+	if (slen > c->maximum)
+		adj_slen = slen = c->maximum;
+	if (adj_slen > slen || (u64)src % c->alignment) {
+		adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
+		slen = min(slen, BOUNCE_BUFFER_SIZE);
+		if (adj_slen > slen)
+			memset(ctx->sbounce + slen, 0, adj_slen - slen);
+		memcpy(ctx->sbounce, src, slen);
+		src = ctx->sbounce;
+		slen = adj_slen;
+		pr_debug("using comp sbounce buffer, len %x\n", slen);
+	}
+
+	dst += hdrsize;
+	dlen -= hdrsize;
+
+	if ((u64)dst % c->alignment) {
+		dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
+		dst += dskip;
+		dlen -= dskip;
+	}
+	if (dlen % c->multiple)
+		dlen = round_down(dlen, c->multiple);
+	if (dlen < c->minimum) {
+nospc:
+		dst = ctx->dbounce;
+		dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
+		dlen = round_down(dlen, c->multiple);
+		dskip = 0;
+		pr_debug("using comp dbounce buffer, len %x\n", dlen);
+	}
+	if (dlen > c->maximum)
+		dlen = c->maximum;
+
+	tmplen = dlen;
+	timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
+	do {
+		dlen = tmplen; /* reset dlen, if we're retrying */
+		ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
+		/* possibly we should reduce the slen here, instead of
+		 * retrying with the dbounce buffer?
+		 */
+		if (ret == -ENOSPC && dst != ctx->dbounce)
+			goto nospc;
+	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+	if (ret)
+		return ret;
+
+	dskip += hdrsize;
+
+	if (dst == ctx->dbounce)
+		memcpy(p->out + dskip, dst, dlen);
+
+	g->padding = cpu_to_be16(dskip);
+	g->compressed_length = cpu_to_be32(dlen);
+	g->uncompressed_length = cpu_to_be32(slen);
+
+	if (p->iremain < slen) {
+		*ignore = slen - p->iremain;
+		slen = p->iremain;
+	}
+
+	pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
+		 slen, *ignore, dlen, dskip);
+
+	return update_param(p, slen, dskip + dlen);
+}
+
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+			  const u8 *src, unsigned int slen,
+			  u8 *dst, unsigned int *dlen)
+{
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct nx842_crypto_header *hdr = &ctx->header;
+	struct nx842_crypto_param p;
+	struct nx842_constraints c = *ctx->driver->constraints;
+	unsigned int groups, hdrsize, h;
+	int ret, n;
+	bool add_header;
+	u16 ignore = 0;
+
+	check_constraints(&c);
+
+	p.in = (u8 *)src;
+	p.iremain = slen;
+	p.out = dst;
+	p.oremain = *dlen;
+	p.ototal = 0;
+
+	*dlen = 0;
+
+	groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
+		       DIV_ROUND_UP(p.iremain, c.maximum));
+	hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
+
+	spin_lock_bh(&ctx->lock);
+
+	/* skip adding header if the buffers meet all constraints */
+	add_header = (p.iremain % c.multiple	||
+		      p.iremain < c.minimum	||
+		      p.iremain > c.maximum	||
+		      (u64)p.in % c.alignment	||
+		      p.oremain % c.multiple	||
+		      p.oremain < c.minimum	||
+		      p.oremain > c.maximum	||
+		      (u64)p.out % c.alignment);
+
+	hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
+	hdr->groups = 0;
+	hdr->ignore = 0;
+
+	while (p.iremain > 0) {
+		n = hdr->groups++;
+		ret = -ENOSPC;
+		if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
+			goto unlock;
+
+		/* header goes before first group */
+		h = !n && add_header ? hdrsize : 0;
+
+		if (ignore)
+			pr_warn("internal error, ignore is set %x\n", ignore);
+
+		ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
+		if (ret)
+			goto unlock;
+	}
+
+	if (!add_header && hdr->groups > 1) {
+		pr_err("Internal error: No header but multiple groups\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	/* ignore indicates the input stream needed to be padded */
+	hdr->ignore = cpu_to_be16(ignore);
+	if (ignore)
+		pr_debug("marked %d bytes as ignore\n", ignore);
+
+	if (add_header)
+		ret = nx842_crypto_add_header(hdr, dst);
+	if (ret)
+		goto unlock;
+
+	*dlen = p.ototal;
+
+	pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
+
+unlock:
+	spin_unlock_bh(&ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_compress);
+
+static int decompress(struct nx842_crypto_ctx *ctx,
+		      struct nx842_crypto_param *p,
+		      struct nx842_crypto_header_group *g,
+		      struct nx842_constraints *c,
+		      u16 ignore)
+{
+	unsigned int slen = be32_to_cpu(g->compressed_length);
+	unsigned int required_len = be32_to_cpu(g->uncompressed_length);
+	unsigned int dlen = p->oremain, tmplen;
+	unsigned int adj_slen = slen;
+	u8 *src = p->in, *dst = p->out;
+	u16 padding = be16_to_cpu(g->padding);
+	int ret, spadding = 0, dpadding = 0;
+	ktime_t timeout;
+
+	if (!slen || !required_len)
+		return -EINVAL;
+
+	if (p->iremain <= 0 || padding + slen > p->iremain)
+		return -EOVERFLOW;
+
+	if (p->oremain <= 0 || required_len - ignore > p->oremain)
+		return -ENOSPC;
+
+	src += padding;
+
+	if (slen % c->multiple)
+		adj_slen = round_up(slen, c->multiple);
+	if (slen < c->minimum)
+		adj_slen = c->minimum;
+	if (slen > c->maximum)
+		goto usesw;
+	if (slen < adj_slen || (u64)src % c->alignment) {
+		/* we can append padding bytes because the 842 format defines
+		 * an "end" template (see lib/842/842_decompress.c) and will
+		 * ignore any bytes following it.
+		 */
+		if (slen < adj_slen)
+			memset(ctx->sbounce + slen, 0, adj_slen - slen);
+		memcpy(ctx->sbounce, src, slen);
+		src = ctx->sbounce;
+		spadding = adj_slen - slen;
+		slen = adj_slen;
+		pr_debug("using decomp sbounce buffer, len %x\n", slen);
+	}
+
+	if (dlen % c->multiple)
+		dlen = round_down(dlen, c->multiple);
+	if (dlen < required_len || (u64)dst % c->alignment) {
+		dst = ctx->dbounce;
+		dlen = min(required_len, BOUNCE_BUFFER_SIZE);
+		pr_debug("using decomp dbounce buffer, len %x\n", dlen);
+	}
+	if (dlen < c->minimum)
+		goto usesw;
+	if (dlen > c->maximum)
+		dlen = c->maximum;
+
+	tmplen = dlen;
+	timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
+	do {
+		dlen = tmplen; /* reset dlen, if we're retrying */
+		ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
+	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+	if (ret) {
+usesw:
+		/* reset everything, sw doesn't have constraints */
+		src = p->in + padding;
+		slen = be32_to_cpu(g->compressed_length);
+		spadding = 0;
+		dst = p->out;
+		dlen = p->oremain;
+		dpadding = 0;
+		if (dlen < required_len) { /* have ignore bytes */
+			dst = ctx->dbounce;
+			dlen = BOUNCE_BUFFER_SIZE;
+		}
+		pr_info_ratelimited("using software 842 decompression\n");
+		ret = sw842_decompress(src, slen, dst, &dlen);
+	}
+	if (ret)
+		return ret;
+
+	slen -= spadding;
+
+	dlen -= ignore;
+	if (ignore)
+		pr_debug("ignoring last %x bytes\n", ignore);
+
+	if (dst == ctx->dbounce)
+		memcpy(p->out, dst, dlen);
+
+	pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
+		 slen, padding, dlen, ignore);
+
+	return update_param(p, slen + padding, dlen);
+}
+
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+			    const u8 *src, unsigned int slen,
+			    u8 *dst, unsigned int *dlen)
+{
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct nx842_crypto_header *hdr;
+	struct nx842_crypto_param p;
+	struct nx842_constraints c = *ctx->driver->constraints;
+	int n, ret, hdr_len;
+	u16 ignore = 0;
+
+	check_constraints(&c);
+
+	p.in = (u8 *)src;
+	p.iremain = slen;
+	p.out = dst;
+	p.oremain = *dlen;
+	p.ototal = 0;
+
+	*dlen = 0;
+
+	hdr = (struct nx842_crypto_header *)src;
+
+	spin_lock_bh(&ctx->lock);
+
+	/* If it doesn't start with our header magic number, assume it's a raw
+	 * 842 compressed buffer and pass it directly to the hardware driver
+	 */
+	if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
+		struct nx842_crypto_header_group g = {
+			.padding =		0,
+			.compressed_length =	cpu_to_be32(p.iremain),
+			.uncompressed_length =	cpu_to_be32(p.oremain),
+		};
+
+		ret = decompress(ctx, &p, &g, &c, 0);
+		if (ret)
+			goto unlock;
+
+		goto success;
+	}
+
+	if (!hdr->groups) {
+		pr_err("header has no groups\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
+		pr_err("header has too many groups %x, max %x\n",
+		       hdr->groups, NX842_CRYPTO_GROUP_MAX);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+	if (hdr_len > slen) {
+		ret = -EOVERFLOW;
+		goto unlock;
+	}
+
+	memcpy(&ctx->header, src, hdr_len);
+	hdr = &ctx->header;
+
+	for (n = 0; n < hdr->groups; n++) {
+		/* ignore applies to last group */
+		if (n + 1 == hdr->groups)
+			ignore = be16_to_cpu(hdr->ignore);
+
+		ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
+		if (ret)
+			goto unlock;
+	}
+
+success:
+	*dlen = p.ototal;
+
+	pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
+
+	ret = 0;
+
+unlock:
+	spin_unlock_bh(&ctx->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
new file mode 100644
index 0000000..b66f19a
--- /dev/null
+++ b/drivers/crypto/nx/nx-842.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NX_842_H__
+#define __NX_842_H__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/ratelimit.h>
+
+/* Restrictions on Data Descriptor List (DDL) and Entry (DDE) buffers
+ *
+ * From NX P8 workbook, sec 4.9.1 "842 details"
+ *   Each DDE buffer is 128 byte aligned
+ *   Each DDE buffer size is a multiple of 32 bytes (except the last)
+ *   The last DDE buffer size is a multiple of 8 bytes
+ */
+#define DDE_BUFFER_ALIGN	(128)
+#define DDE_BUFFER_SIZE_MULT	(32)
+#define DDE_BUFFER_LAST_MULT	(8)
+
+/* Arbitrary DDL length limit
+ * Allows max buffer size of MAX-1 to MAX pages
+ * (depending on alignment)
+ */
+#define DDL_LEN_MAX		(17)
+
+/* CCW 842 CI/FC masks
+ * NX P8 workbook, section 4.3.1, figure 4-6
+ * "CI/FC Boundary by NX CT type"
+ */
+#define CCW_CI_842		(0x00003ff8)
+#define CCW_FC_842		(0x00000007)
+
+/* CCW Function Codes (FC) for 842
+ * NX P8 workbook, section 4.9, table 4-28
+ * "Function Code Definitions for 842 Memory Compression"
+ */
+#define CCW_FC_842_COMP_NOCRC	(0)
+#define CCW_FC_842_COMP_CRC	(1)
+#define CCW_FC_842_DECOMP_NOCRC	(2)
+#define CCW_FC_842_DECOMP_CRC	(3)
+#define CCW_FC_842_MOVE		(4)
+
+/* CSB CC Error Types for 842
+ * NX P8 workbook, section 4.10.3, table 4-30
+ * "Reported Error Types Summary Table"
+ */
+/* These are all duplicates of existing codes defined in icswx.h. */
+#define CSB_CC_TRANSLATION_DUP1	(80)
+#define CSB_CC_TRANSLATION_DUP2	(82)
+#define CSB_CC_TRANSLATION_DUP3	(84)
+#define CSB_CC_TRANSLATION_DUP4	(86)
+#define CSB_CC_TRANSLATION_DUP5	(92)
+#define CSB_CC_TRANSLATION_DUP6	(94)
+#define CSB_CC_PROTECTION_DUP1	(81)
+#define CSB_CC_PROTECTION_DUP2	(83)
+#define CSB_CC_PROTECTION_DUP3	(85)
+#define CSB_CC_PROTECTION_DUP4	(87)
+#define CSB_CC_PROTECTION_DUP5	(93)
+#define CSB_CC_PROTECTION_DUP6	(95)
+#define CSB_CC_RD_EXTERNAL_DUP1	(89)
+#define CSB_CC_RD_EXTERNAL_DUP2	(90)
+#define CSB_CC_RD_EXTERNAL_DUP3	(91)
+/* These are specific to NX */
+/* 842 codes */
+#define CSB_CC_TPBC_GT_SPBC	(64) /* no error, but >1 comp ratio */
+#define CSB_CC_CRC_MISMATCH	(65) /* decomp crc mismatch */
+#define CSB_CC_TEMPL_INVALID	(66) /* decomp invalid template value */
+#define CSB_CC_TEMPL_OVERFLOW	(67) /* decomp template shows data after end */
+/* sym crypt codes */
+#define CSB_CC_DECRYPT_OVERFLOW	(64)
+/* asym crypt codes */
+#define CSB_CC_MINV_OVERFLOW	(128)
+/*
+ * HW error - Job did not finish in the maximum time allowed.
+ * Job terminated.
+ */
+#define CSB_CC_HW_EXPIRED_TIMER		(224)
+/* These are reserved for hypervisor use */
+#define CSB_CC_HYP_RESERVE_START	(240)
+#define CSB_CC_HYP_RESERVE_END		(253)
+#define CSB_CC_HYP_RESERVE_P9_END	(251)
+/* No valid interrupt server (P9 or later). */
+#define CSB_CC_HYP_RESERVE_NO_INTR_SERVER	(252)
+#define CSB_CC_HYP_NO_HW		(254)
+#define CSB_CC_HYP_HANG_ABORTED		(255)
+
+/* CCB Completion Modes (CM) for 842
+ * NX P8 workbook, section 4.3, figure 4-5
+ * "CRB Details - Normal Cop_Req (CL=00, C=1)"
+ */
+#define CCB_CM_EXTRA_WRITE	(CCB_CM0_ALL_COMPLETIONS & CCB_CM12_STORE)
+#define CCB_CM_INTERRUPT	(CCB_CM0_ALL_COMPLETIONS & CCB_CM12_INTERRUPT)
+
+#define LEN_ON_SIZE(pa, size)	((size) - ((pa) & ((size) - 1)))
+#define LEN_ON_PAGE(pa)		LEN_ON_SIZE(pa, PAGE_SIZE)
+
+static inline unsigned long nx842_get_pa(void *addr)
+{
+	if (!is_vmalloc_addr(addr))
+		return __pa(addr);
+
+	return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr);
+}
+
+/**
+ * This provides the driver's constraints.  Different nx842 implementations
+ * may have varying requirements.  The constraints are:
+ *   @alignment:	All buffers should be aligned to this
+ *   @multiple:		All buffer lengths should be a multiple of this
+ *   @minimum:		Buffer lengths must not be less than this amount
+ *   @maximum:		Buffer lengths must not be more than this amount
+ *
+ * The constraints apply to all buffers and lengths, both input and output,
+ * for both compression and decompression, except for the minimum which
+ * only applies to compression input and decompression output; the
+ * compressed data can be less than the minimum constraint.  It can be
+ * assumed that compressed data will always adhere to the multiple
+ * constraint.
+ *
+ * The driver may succeed even if these constraints are violated;
+ * however the driver can return failure or suffer reduced performance
+ * if any constraint is not met.
+ */
+struct nx842_constraints {
+	int alignment;
+	int multiple;
+	int minimum;
+	int maximum;
+};
+
+struct nx842_driver {
+	char *name;
+	struct module *owner;
+	size_t workmem_size;
+
+	struct nx842_constraints *constraints;
+
+	int (*compress)(const unsigned char *in, unsigned int in_len,
+			unsigned char *out, unsigned int *out_len,
+			void *wrkmem);
+	int (*decompress)(const unsigned char *in, unsigned int in_len,
+			  unsigned char *out, unsigned int *out_len,
+			  void *wrkmem);
+};
+
+struct nx842_crypto_header_group {
+	__be16 padding;			/* unused bytes at start of group */
+	__be32 compressed_length;	/* compressed bytes in group */
+	__be32 uncompressed_length;	/* bytes after decompression */
+} __packed;
+
+struct nx842_crypto_header {
+	__be16 magic;		/* NX842_CRYPTO_MAGIC */
+	__be16 ignore;		/* decompressed end bytes to ignore */
+	u8 groups;		/* total groups in this header */
+	struct nx842_crypto_header_group group[];
+} __packed;
+
+#define NX842_CRYPTO_GROUP_MAX	(0x20)
+
+struct nx842_crypto_ctx {
+	spinlock_t lock;
+
+	u8 *wmem;
+	u8 *sbounce, *dbounce;
+
+	struct nx842_crypto_header header;
+	struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
+
+	struct nx842_driver *driver;
+};
+
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
+void nx842_crypto_exit(struct crypto_tfm *tfm);
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+			  const u8 *src, unsigned int slen,
+			  u8 *dst, unsigned int *dlen);
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+			    const u8 *src, unsigned int slen,
+			    u8 *dst, unsigned int *dlen);
+
+#endif /* __NX_842_H__ */
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
new file mode 100644
index 0000000..a066cc3
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -0,0 +1,150 @@
+/**
+ * AES CBC routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
+			      const u8          *in_key,
+			      unsigned int       key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+	nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+		break;
+	case AES_KEYSIZE_192:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+		break;
+	case AES_KEYSIZE_256:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_CBC;
+	memcpy(csbcpb->cpb.aes_cbc.key, in_key, key_len);
+
+	return 0;
+}
+
+static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
+			    struct scatterlist    *dst,
+			    struct scatterlist    *src,
+			    unsigned int           nbytes,
+			    int                    enc)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	unsigned long irq_flags;
+	unsigned int processed = 0, to_process;
+	int rc;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	if (enc)
+		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+	else
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+	do {
+		to_process = nbytes - processed;
+
+		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
+				       processed, csbcpb->cpb.aes_cbc.iv);
+		if (rc)
+			goto out;
+
+		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(csbcpb->csb.processed_byte_count,
+			     &(nx_ctx->stats->aes_bytes));
+
+		processed += to_process;
+	} while (processed < nbytes);
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc,
+			      struct scatterlist    *dst,
+			      struct scatterlist    *src,
+			      unsigned int           nbytes)
+{
+	return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1);
+}
+
+static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc,
+			      struct scatterlist    *dst,
+			      struct scatterlist    *src,
+			      unsigned int           nbytes)
+{
+	return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0);
+}
+
+struct crypto_alg nx_cbc_aes_alg = {
+	.cra_name        = "cbc(aes)",
+	.cra_driver_name = "cbc-aes-nx",
+	.cra_priority    = 300,
+	.cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize   = AES_BLOCK_SIZE,
+	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+	.cra_type        = &crypto_blkcipher_type,
+	.cra_alignmask   = 0xf,
+	.cra_module      = THIS_MODULE,
+	.cra_init        = nx_crypto_ctx_aes_cbc_init,
+	.cra_exit        = nx_crypto_ctx_exit,
+	.cra_blkcipher = {
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize      = AES_BLOCK_SIZE,
+		.setkey      = cbc_aes_nx_set_key,
+		.encrypt     = cbc_aes_nx_encrypt,
+		.decrypt     = cbc_aes_nx_decrypt,
+	}
+};
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
new file mode 100644
index 0000000..7038f36
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -0,0 +1,595 @@
+/**
+ * AES CCM routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
+			      const u8           *in_key,
+			      unsigned int        key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+
+	nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
+	memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
+
+	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
+	memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
+
+	return 0;
+
+}
+
+static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
+				  const u8           *in_key,
+				  unsigned int        key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+
+	if (key_len < 3)
+		return -EINVAL;
+
+	key_len -= 3;
+
+	memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
+
+	return ccm_aes_nx_set_key(tfm, in_key, key_len);
+}
+
+static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
+				  unsigned int authsize)
+{
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
+				      unsigned int authsize)
+{
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* taken from crypto/ccm.c */
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (unsigned int)(1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+/* taken from crypto/ccm.c */
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+	/* 2 <= L <= 8, so 1 <= L' <= 7. */
+	if (1 > iv[0] || iv[0] > 7)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* based on code from crypto/ccm.c */
+static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
+		       unsigned int cryptlen, u8 *b0)
+{
+	unsigned int l, lp, m = authsize;
+	int rc;
+
+	memcpy(b0, iv, 16);
+
+	lp = b0[0];
+	l = lp + 1;
+
+	/* set m, bits 3-5 */
+	*b0 |= (8 * ((m - 2) / 2));
+
+	/* set adata, bit 6, if associated data is used */
+	if (assoclen)
+		*b0 |= 64;
+
+	rc = set_msg_len(b0 + 16 - l, cryptlen, l);
+
+	return rc;
+}
+
+static int generate_pat(u8                   *iv,
+			struct aead_request  *req,
+			struct nx_crypto_ctx *nx_ctx,
+			unsigned int          authsize,
+			unsigned int          nbytes,
+			unsigned int	      assoclen,
+			u8                   *out)
+{
+	struct nx_sg *nx_insg = nx_ctx->in_sg;
+	struct nx_sg *nx_outsg = nx_ctx->out_sg;
+	unsigned int iauth_len = 0;
+	u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
+	int rc;
+	unsigned int max_sg_len;
+
+	/* zero the ctr value */
+	memset(iv + 15 - iv[0], 0, iv[0] + 1);
+
+	/* page 78 of nx_wb.pdf has,
+	 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
+	 * in length. If a full message is used, the AES CCA implementation
+	 * restricts the maximum AAD length to 2^32 -1 bytes.
+	 * If partial messages are used, the implementation supports
+	 * 2^64 -1 bytes maximum AAD length.
+	 *
+	 * However, in the cryptoapi's aead_request structure,
+	 * assoclen is an unsigned int, thus it cannot hold a length
+	 * value greater than 2^32 - 1.
+	 * Thus the AAD is further constrained by this and is never
+	 * greater than 2^32.
+	 */
+
+	if (!assoclen) {
+		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
+	} else if (assoclen <= 14) {
+		/* if associated data is 14 bytes or less, we do 1 GCM
+		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
+		 * which is fed in through the source buffers here */
+		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
+		b1 = nx_ctx->priv.ccm.iauth_tag;
+		iauth_len = assoclen;
+	} else if (assoclen <= 65280) {
+		/* if associated data is less than (2^16 - 2^8), we construct
+		 * B1 differently and feed in the associated data to a CCA
+		 * operation */
+		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
+		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
+		iauth_len = 14;
+	} else {
+		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
+		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
+		iauth_len = 10;
+	}
+
+	/* generate B0 */
+	rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
+	if (rc)
+		return rc;
+
+	/* generate B1:
+	 * add control info for associated data
+	 * RFC 3610 and NIST Special Publication 800-38C
+	 */
+	if (b1) {
+		memset(b1, 0, 16);
+		if (assoclen <= 65280) {
+			*(u16 *)b1 = assoclen;
+			scatterwalk_map_and_copy(b1 + 2, req->src, 0,
+					 iauth_len, SCATTERWALK_FROM_SG);
+		} else {
+			*(u16 *)b1 = (u16)(0xfffe);
+			*(u32 *)&b1[2] = assoclen;
+			scatterwalk_map_and_copy(b1 + 6, req->src, 0,
+					 iauth_len, SCATTERWALK_FROM_SG);
+		}
+	}
+
+	/* now copy any remaining AAD to scatterlist and call nx... */
+	if (!assoclen) {
+		return rc;
+	} else if (assoclen <= 14) {
+		unsigned int len = 16;
+
+		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
+
+		if (len != 16)
+			return -EINVAL;
+
+		nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
+					    nx_ctx->ap->sglen);
+
+		if (len != 16)
+			return -EINVAL;
+
+		/* inlen should be negative, indicating to phyp that its a
+		 * pointer to an sg list */
+		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
+					sizeof(struct nx_sg);
+		nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
+					sizeof(struct nx_sg);
+
+		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
+
+		result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			return rc;
+
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
+
+	} else {
+		unsigned int processed = 0, to_process;
+
+		processed += iauth_len;
+
+		/* page_limit: number of sg entries that fit on one page */
+		max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+				nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+		max_sg_len = min_t(u64, max_sg_len,
+				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+		do {
+			to_process = min_t(u32, assoclen - processed,
+					   nx_ctx->ap->databytelen);
+
+			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
+						    nx_ctx->ap->sglen,
+						    req->src, processed,
+						    &to_process);
+
+			if ((to_process + processed) < assoclen) {
+				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
+					NX_FDM_INTERMEDIATE;
+			} else {
+				NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
+					~NX_FDM_INTERMEDIATE;
+			}
+
+
+			nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
+						sizeof(struct nx_sg);
+
+			result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
+
+			rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
+				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+			if (rc)
+				return rc;
+
+			memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
+				nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
+				AES_BLOCK_SIZE);
+
+			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
+
+			atomic_inc(&(nx_ctx->stats->aes_ops));
+			atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
+
+			processed += to_process;
+		} while (processed < assoclen);
+
+		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
+	}
+
+	memcpy(out, result, AES_BLOCK_SIZE);
+
+	return rc;
+}
+
+static int ccm_nx_decrypt(struct aead_request   *req,
+			  struct blkcipher_desc *desc,
+			  unsigned int assoclen)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	unsigned int nbytes = req->cryptlen;
+	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+	struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
+	unsigned long irq_flags;
+	unsigned int processed = 0, to_process;
+	int rc = -1;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	nbytes -= authsize;
+
+	/* copy out the auth tag to compare with later */
+	scatterwalk_map_and_copy(priv->oauth_tag,
+				 req->src, nbytes + req->assoclen, authsize,
+				 SCATTERWALK_FROM_SG);
+
+	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
+	if (rc)
+		goto out;
+
+	do {
+
+		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
+		 * update. This value is bound by sg list limits.
+		 */
+		to_process = nbytes - processed;
+
+		if ((to_process + processed) < nbytes)
+			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+		else
+			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+				       &to_process, processed + req->assoclen,
+				       csbcpb->cpb.aes_ccm.iv_or_ctr);
+		if (rc)
+			goto out;
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		/* for partial completion, copy following for next
+		 * entry into loop...
+		 */
+		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
+			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
+		memcpy(csbcpb->cpb.aes_ccm.in_s0,
+			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
+
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+		/* update stats */
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(csbcpb->csb.processed_byte_count,
+			     &(nx_ctx->stats->aes_bytes));
+
+		processed += to_process;
+	} while (processed < nbytes);
+
+	rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
+		    authsize) ? -EBADMSG : 0;
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int ccm_nx_encrypt(struct aead_request   *req,
+			  struct blkcipher_desc *desc,
+			  unsigned int assoclen)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	unsigned int nbytes = req->cryptlen;
+	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+	unsigned long irq_flags;
+	unsigned int processed = 0, to_process;
+	int rc = -1;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
+	if (rc)
+		goto out;
+
+	do {
+		/* to process: the AES_BLOCK_SIZE data chunk to process in this
+		 * update. This value is bound by sg list limits.
+		 */
+		to_process = nbytes - processed;
+
+		if ((to_process + processed) < nbytes)
+			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+		else
+			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+
+		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+				       &to_process, processed + req->assoclen,
+				       csbcpb->cpb.aes_ccm.iv_or_ctr);
+		if (rc)
+			goto out;
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		/* for partial completion, copy following for next
+		 * entry into loop...
+		 */
+		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
+			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
+		memcpy(csbcpb->cpb.aes_ccm.in_s0,
+			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
+
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+		/* update stats */
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(csbcpb->csb.processed_byte_count,
+			     &(nx_ctx->stats->aes_bytes));
+
+		processed += to_process;
+
+	} while (processed < nbytes);
+
+	/* copy out the auth tag */
+	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
+				 req->dst, nbytes + req->assoclen, authsize,
+				 SCATTERWALK_TO_SG);
+
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int ccm4309_aes_nx_encrypt(struct aead_request *req)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+	struct blkcipher_desc desc;
+	u8 *iv = rctx->iv;
+
+	iv[0] = 3;
+	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+	memcpy(iv + 4, req->iv, 8);
+
+	desc.info = iv;
+
+	return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
+}
+
+static int ccm_aes_nx_encrypt(struct aead_request *req)
+{
+	struct blkcipher_desc desc;
+	int rc;
+
+	desc.info = req->iv;
+
+	rc = crypto_ccm_check_iv(desc.info);
+	if (rc)
+		return rc;
+
+	return ccm_nx_encrypt(req, &desc, req->assoclen);
+}
+
+static int ccm4309_aes_nx_decrypt(struct aead_request *req)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+	struct blkcipher_desc desc;
+	u8 *iv = rctx->iv;
+
+	iv[0] = 3;
+	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+	memcpy(iv + 4, req->iv, 8);
+
+	desc.info = iv;
+
+	return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
+}
+
+static int ccm_aes_nx_decrypt(struct aead_request *req)
+{
+	struct blkcipher_desc desc;
+	int rc;
+
+	desc.info = req->iv;
+
+	rc = crypto_ccm_check_iv(desc.info);
+	if (rc)
+		return rc;
+
+	return ccm_nx_decrypt(req, &desc, req->assoclen);
+}
+
+/* tell the block cipher walk routines that this is a stream cipher by
+ * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
+ * during encrypt/decrypt doesn't solve this problem, because it calls
+ * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
+ * but instead uses this tfm->blocksize. */
+struct aead_alg nx_ccm_aes_alg = {
+	.base = {
+		.cra_name        = "ccm(aes)",
+		.cra_driver_name = "ccm-aes-nx",
+		.cra_priority    = 300,
+		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize   = 1,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_module      = THIS_MODULE,
+	},
+	.init        = nx_crypto_ctx_aes_ccm_init,
+	.exit        = nx_crypto_ctx_aead_exit,
+	.ivsize      = AES_BLOCK_SIZE,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.setkey      = ccm_aes_nx_set_key,
+	.setauthsize = ccm_aes_nx_setauthsize,
+	.encrypt     = ccm_aes_nx_encrypt,
+	.decrypt     = ccm_aes_nx_decrypt,
+};
+
+struct aead_alg nx_ccm4309_aes_alg = {
+	.base = {
+		.cra_name        = "rfc4309(ccm(aes))",
+		.cra_driver_name = "rfc4309-ccm-aes-nx",
+		.cra_priority    = 300,
+		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize   = 1,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_module      = THIS_MODULE,
+	},
+	.init        = nx_crypto_ctx_aes_ccm_init,
+	.exit        = nx_crypto_ctx_aead_exit,
+	.ivsize      = 8,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.setkey      = ccm4309_aes_nx_set_key,
+	.setauthsize = ccm4309_aes_nx_setauthsize,
+	.encrypt     = ccm4309_aes_nx_encrypt,
+	.decrypt     = ccm4309_aes_nx_decrypt,
+};
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
new file mode 100644
index 0000000..898c0a2
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -0,0 +1,167 @@
+/**
+ * AES CTR routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
+			      const u8          *in_key,
+			      unsigned int       key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+	nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+		break;
+	case AES_KEYSIZE_192:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+		break;
+	case AES_KEYSIZE_256:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR;
+	memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len);
+
+	return 0;
+}
+
+static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
+				  const u8          *in_key,
+				  unsigned int       key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+
+	if (key_len < CTR_RFC3686_NONCE_SIZE)
+		return -EINVAL;
+
+	memcpy(nx_ctx->priv.ctr.nonce,
+	       in_key + key_len - CTR_RFC3686_NONCE_SIZE,
+	       CTR_RFC3686_NONCE_SIZE);
+
+	key_len -= CTR_RFC3686_NONCE_SIZE;
+
+	return ctr_aes_nx_set_key(tfm, in_key, key_len);
+}
+
+static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
+			    struct scatterlist    *dst,
+			    struct scatterlist    *src,
+			    unsigned int           nbytes)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	unsigned long irq_flags;
+	unsigned int processed = 0, to_process;
+	int rc;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	do {
+		to_process = nbytes - processed;
+
+		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
+				       processed, csbcpb->cpb.aes_ctr.iv);
+		if (rc)
+			goto out;
+
+		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(csbcpb->csb.processed_byte_count,
+			     &(nx_ctx->stats->aes_bytes));
+
+		processed += to_process;
+	} while (processed < nbytes);
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
+				struct scatterlist    *dst,
+				struct scatterlist    *src,
+				unsigned int           nbytes)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+	u8 iv[16];
+
+	memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
+	memcpy(iv + CTR_RFC3686_NONCE_SIZE,
+	       desc->info, CTR_RFC3686_IV_SIZE);
+	iv[12] = iv[13] = iv[14] = 0;
+	iv[15] = 1;
+
+	desc->info = iv;
+
+	return ctr_aes_nx_crypt(desc, dst, src, nbytes);
+}
+
+struct crypto_alg nx_ctr3686_aes_alg = {
+	.cra_name        = "rfc3686(ctr(aes))",
+	.cra_driver_name = "rfc3686-ctr-aes-nx",
+	.cra_priority    = 300,
+	.cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize   = 1,
+	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+	.cra_type        = &crypto_blkcipher_type,
+	.cra_module      = THIS_MODULE,
+	.cra_init        = nx_crypto_ctx_aes_ctr_init,
+	.cra_exit        = nx_crypto_ctx_exit,
+	.cra_blkcipher = {
+		.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+		.ivsize      = CTR_RFC3686_IV_SIZE,
+		.geniv       = "seqiv",
+		.setkey      = ctr3686_aes_nx_set_key,
+		.encrypt     = ctr3686_aes_nx_crypt,
+		.decrypt     = ctr3686_aes_nx_crypt,
+	}
+};
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
new file mode 100644
index 0000000..cfdde8b
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -0,0 +1,149 @@
+/**
+ * AES ECB routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
+			      const u8          *in_key,
+			      unsigned int       key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+
+	nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+		break;
+	case AES_KEYSIZE_192:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+		break;
+	case AES_KEYSIZE_256:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+	memcpy(csbcpb->cpb.aes_ecb.key, in_key, key_len);
+
+	return 0;
+}
+
+static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
+			    struct scatterlist    *dst,
+			    struct scatterlist    *src,
+			    unsigned int           nbytes,
+			    int                    enc)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	unsigned long irq_flags;
+	unsigned int processed = 0, to_process;
+	int rc;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	if (enc)
+		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+	else
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+	do {
+		to_process = nbytes - processed;
+
+		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
+				processed, NULL);
+		if (rc)
+			goto out;
+
+		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(csbcpb->csb.processed_byte_count,
+			     &(nx_ctx->stats->aes_bytes));
+
+		processed += to_process;
+	} while (processed < nbytes);
+
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc,
+			      struct scatterlist    *dst,
+			      struct scatterlist    *src,
+			      unsigned int           nbytes)
+{
+	return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1);
+}
+
+static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc,
+			      struct scatterlist    *dst,
+			      struct scatterlist    *src,
+			      unsigned int           nbytes)
+{
+	return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0);
+}
+
+struct crypto_alg nx_ecb_aes_alg = {
+	.cra_name        = "ecb(aes)",
+	.cra_driver_name = "ecb-aes-nx",
+	.cra_priority    = 300,
+	.cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize   = AES_BLOCK_SIZE,
+	.cra_alignmask   = 0xf,
+	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+	.cra_type        = &crypto_blkcipher_type,
+	.cra_module      = THIS_MODULE,
+	.cra_init        = nx_crypto_ctx_aes_ecb_init,
+	.cra_exit        = nx_crypto_ctx_exit,
+	.cra_blkcipher = {
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey      = ecb_aes_nx_set_key,
+		.encrypt     = ecb_aes_nx_encrypt,
+		.decrypt     = ecb_aes_nx_decrypt,
+	}
+};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
new file mode 100644
index 0000000..a810596
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -0,0 +1,526 @@
+/**
+ * AES GCM routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
+			      const u8           *in_key,
+			      unsigned int        key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+
+	nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+		break;
+	case AES_KEYSIZE_192:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+		break;
+	case AES_KEYSIZE_256:
+		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+	memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
+
+	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
+	memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
+
+	return 0;
+}
+
+static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
+				  const u8           *in_key,
+				  unsigned int        key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
+	char *nonce = nx_ctx->priv.gcm.nonce;
+	int rc;
+
+	if (key_len < 4)
+		return -EINVAL;
+
+	key_len -= 4;
+
+	rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
+	if (rc)
+		goto out;
+
+	memcpy(nonce, in_key + key_len, 4);
+out:
+	return rc;
+}
+
+static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
+				      unsigned int authsize)
+{
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
+		  struct aead_request   *req,
+		  u8                    *out,
+		  unsigned int assoclen)
+{
+	int rc;
+	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+	struct scatter_walk walk;
+	struct nx_sg *nx_sg = nx_ctx->in_sg;
+	unsigned int nbytes = assoclen;
+	unsigned int processed = 0, to_process;
+	unsigned int max_sg_len;
+
+	if (nbytes <= AES_BLOCK_SIZE) {
+		scatterwalk_start(&walk, req->src);
+		scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
+		scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
+		return 0;
+	}
+
+	NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
+
+	/* page_limit: number of sg entries that fit on one page */
+	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+			   nx_ctx->ap->sglen);
+	max_sg_len = min_t(u64, max_sg_len,
+			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	do {
+		/*
+		 * to_process: the data chunk to process in this update.
+		 * This value is bound by sg list limits.
+		 */
+		to_process = min_t(u64, nbytes - processed,
+				   nx_ctx->ap->databytelen);
+		to_process = min_t(u64, to_process,
+				   NX_PAGE_SIZE * (max_sg_len - 1));
+
+		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+					  req->src, processed, &to_process);
+
+		if ((to_process + processed) < nbytes)
+			NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
+		else
+			NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
+
+		nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
+					* sizeof(struct nx_sg);
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
+				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			return rc;
+
+		memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
+				csbcpb_aead->cpb.aes_gca.out_pat,
+				AES_BLOCK_SIZE);
+		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
+
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
+
+		processed += to_process;
+	} while (processed < nbytes);
+
+	memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
+
+	return rc;
+}
+
+static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
+		unsigned int assoclen)
+{
+	int rc;
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	struct nx_sg *nx_sg;
+	unsigned int nbytes = assoclen;
+	unsigned int processed = 0, to_process;
+	unsigned int max_sg_len;
+
+	/* Set GMAC mode */
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
+
+	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+
+	/* page_limit: number of sg entries that fit on one page */
+	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+			   nx_ctx->ap->sglen);
+	max_sg_len = min_t(u64, max_sg_len,
+			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	/* Copy IV */
+	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
+
+	do {
+		/*
+		 * to_process: the data chunk to process in this update.
+		 * This value is bound by sg list limits.
+		 */
+		to_process = min_t(u64, nbytes - processed,
+				   nx_ctx->ap->databytelen);
+		to_process = min_t(u64, to_process,
+				   NX_PAGE_SIZE * (max_sg_len - 1));
+
+		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+					  req->src, processed, &to_process);
+
+		if ((to_process + processed) < nbytes)
+			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+		else
+			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
+					* sizeof(struct nx_sg);
+
+		csbcpb->cpb.aes_gcm.bit_length_data = 0;
+		csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
+			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
+		memcpy(csbcpb->cpb.aes_gcm.in_s0,
+			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
+
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
+
+		processed += to_process;
+	} while (processed < nbytes);
+
+out:
+	/* Restore GCM mode */
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+	return rc;
+}
+
+static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
+		     int enc)
+{
+	int rc;
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	char out[AES_BLOCK_SIZE];
+	struct nx_sg *in_sg, *out_sg;
+	int len;
+
+	/* For scenarios where the input message is zero length, AES CTR mode
+	 * may be used. Set the source data to be a single block (16B) of all
+	 * zeros, and set the input IV value to be the same as the GMAC IV
+	 * value. - nx_wb 4.8.1.3 */
+
+	/* Change to ECB mode */
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+	memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
+			sizeof(csbcpb->cpb.aes_ecb.key));
+	if (enc)
+		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+	else
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+	len = AES_BLOCK_SIZE;
+
+	/* Encrypt the counter/IV */
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
+				 &len, nx_ctx->ap->sglen);
+
+	if (len != AES_BLOCK_SIZE)
+		return -EINVAL;
+
+	len = sizeof(out);
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
+				  nx_ctx->ap->sglen);
+
+	if (len != sizeof(out))
+		return -EINVAL;
+
+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	if (rc)
+		goto out;
+	atomic_inc(&(nx_ctx->stats->aes_ops));
+
+	/* Copy out the auth tag */
+	memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
+			crypto_aead_authsize(crypto_aead_reqtfm(req)));
+out:
+	/* Restore XCBC mode */
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+
+	/*
+	 * ECB key uses the same region that GCM AAD and counter, so it's safe
+	 * to just fill it with zeroes.
+	 */
+	memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
+
+	return rc;
+}
+
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
+			    unsigned int assoclen)
+{
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	struct blkcipher_desc desc;
+	unsigned int nbytes = req->cryptlen;
+	unsigned int processed = 0, to_process;
+	unsigned long irq_flags;
+	int rc = -EINVAL;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	desc.info = rctx->iv;
+	/* initialize the counter */
+	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
+
+	if (nbytes == 0) {
+		if (assoclen == 0)
+			rc = gcm_empty(req, &desc, enc);
+		else
+			rc = gmac(req, &desc, assoclen);
+		if (rc)
+			goto out;
+		else
+			goto mac;
+	}
+
+	/* Process associated data */
+	csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
+	if (assoclen) {
+		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
+			    assoclen);
+		if (rc)
+			goto out;
+	}
+
+	/* Set flags for encryption */
+	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+	if (enc) {
+		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+	} else {
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+	}
+
+	do {
+		to_process = nbytes - processed;
+
+		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
+		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
+				       req->src, &to_process,
+				       processed + req->assoclen,
+				       csbcpb->cpb.aes_gcm.iv_or_cnt);
+
+		if (rc)
+			goto out;
+
+		if ((to_process + processed) < nbytes)
+			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+		else
+			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
+		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
+			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
+		memcpy(csbcpb->cpb.aes_gcm.in_s0,
+			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
+
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+		atomic64_add(csbcpb->csb.processed_byte_count,
+			     &(nx_ctx->stats->aes_bytes));
+
+		processed += to_process;
+	} while (processed < nbytes);
+
+mac:
+	if (enc) {
+		/* copy out the auth tag */
+		scatterwalk_map_and_copy(
+			csbcpb->cpb.aes_gcm.out_pat_or_mac,
+			req->dst, req->assoclen + nbytes,
+			crypto_aead_authsize(crypto_aead_reqtfm(req)),
+			SCATTERWALK_TO_SG);
+	} else {
+		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
+		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
+
+		scatterwalk_map_and_copy(
+			itag, req->src, req->assoclen + nbytes,
+			crypto_aead_authsize(crypto_aead_reqtfm(req)),
+			SCATTERWALK_FROM_SG);
+		rc = crypto_memneq(itag, otag,
+			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
+		     -EBADMSG : 0;
+	}
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int gcm_aes_nx_encrypt(struct aead_request *req)
+{
+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+	char *iv = rctx->iv;
+
+	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
+
+	return gcm_aes_nx_crypt(req, 1, req->assoclen);
+}
+
+static int gcm_aes_nx_decrypt(struct aead_request *req)
+{
+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+	char *iv = rctx->iv;
+
+	memcpy(iv, req->iv, GCM_AES_IV_SIZE);
+
+	return gcm_aes_nx_crypt(req, 0, req->assoclen);
+}
+
+static int gcm4106_aes_nx_encrypt(struct aead_request *req)
+{
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+	char *iv = rctx->iv;
+	char *nonce = nx_ctx->priv.gcm.nonce;
+
+	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
+
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
+}
+
+static int gcm4106_aes_nx_decrypt(struct aead_request *req)
+{
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+	char *iv = rctx->iv;
+	char *nonce = nx_ctx->priv.gcm.nonce;
+
+	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
+
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
+}
+
+/* tell the block cipher walk routines that this is a stream cipher by
+ * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
+ * during encrypt/decrypt doesn't solve this problem, because it calls
+ * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
+ * but instead uses this tfm->blocksize. */
+struct aead_alg nx_gcm_aes_alg = {
+	.base = {
+		.cra_name        = "gcm(aes)",
+		.cra_driver_name = "gcm-aes-nx",
+		.cra_priority    = 300,
+		.cra_blocksize   = 1,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_module      = THIS_MODULE,
+	},
+	.init        = nx_crypto_ctx_aes_gcm_init,
+	.exit        = nx_crypto_ctx_aead_exit,
+	.ivsize      = GCM_AES_IV_SIZE,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.setkey      = gcm_aes_nx_set_key,
+	.encrypt     = gcm_aes_nx_encrypt,
+	.decrypt     = gcm_aes_nx_decrypt,
+};
+
+struct aead_alg nx_gcm4106_aes_alg = {
+	.base = {
+		.cra_name        = "rfc4106(gcm(aes))",
+		.cra_driver_name = "rfc4106-gcm-aes-nx",
+		.cra_priority    = 300,
+		.cra_blocksize   = 1,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_module      = THIS_MODULE,
+	},
+	.init        = nx_crypto_ctx_aes_gcm_init,
+	.exit        = nx_crypto_ctx_aead_exit,
+	.ivsize      = GCM_RFC4106_IV_SIZE,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.setkey      = gcm4106_aes_nx_set_key,
+	.setauthsize = gcm4106_aes_nx_setauthsize,
+	.encrypt     = gcm4106_aes_nx_encrypt,
+	.decrypt     = gcm4106_aes_nx_decrypt,
+};
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
new file mode 100644
index 0000000..ad3358e
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -0,0 +1,395 @@
+/**
+ * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+struct xcbc_state {
+	u8 state[AES_BLOCK_SIZE];
+	unsigned int count;
+	u8 buffer[AES_BLOCK_SIZE];
+};
+
+static int nx_xcbc_set_key(struct crypto_shash *desc,
+			   const u8            *in_key,
+			   unsigned int         key_len)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
+
+	return 0;
+}
+
+/*
+ * Based on RFC 3566, for a zero-length message:
+ *
+ * n = 1
+ * K1 = E(K, 0x01010101010101010101010101010101)
+ * K3 = E(K, 0x03030303030303030303030303030303)
+ * E[0] = 0x00000000000000000000000000000000
+ * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
+ * E[1] = (K1, M[1] ^ E[0] ^ K3)
+ * Tag = M[1]
+ */
+static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	struct nx_sg *in_sg, *out_sg;
+	u8 keys[2][AES_BLOCK_SIZE];
+	u8 key[32];
+	int rc = 0;
+	int len;
+
+	/* Change to ECB mode */
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+	memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE);
+	memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE);
+	NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+
+	/* K1 and K3 base patterns */
+	memset(keys[0], 0x01, sizeof(keys[0]));
+	memset(keys[1], 0x03, sizeof(keys[1]));
+
+	len = sizeof(keys);
+	/* Generate K1 and K3 encrypting the patterns */
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
+				 nx_ctx->ap->sglen);
+
+	if (len != sizeof(keys))
+		return -EINVAL;
+
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
+				  nx_ctx->ap->sglen);
+
+	if (len != sizeof(keys))
+		return -EINVAL;
+
+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	if (rc)
+		goto out;
+	atomic_inc(&(nx_ctx->stats->aes_ops));
+
+	/* XOr K3 with the padding for a 0 length message */
+	keys[1][0] ^= 0x80;
+
+	len = sizeof(keys[1]);
+
+	/* Encrypt the final result */
+	memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
+				 nx_ctx->ap->sglen);
+
+	if (len != sizeof(keys[1]))
+		return -EINVAL;
+
+	len = AES_BLOCK_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+				  nx_ctx->ap->sglen);
+
+	if (len != AES_BLOCK_SIZE)
+		return -EINVAL;
+
+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	if (rc)
+		goto out;
+	atomic_inc(&(nx_ctx->stats->aes_ops));
+
+out:
+	/* Restore XCBC mode */
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
+	memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE);
+	NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+	return rc;
+}
+
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	int err;
+
+	err = nx_crypto_ctx_aes_xcbc_init(tfm);
+	if (err)
+		return err;
+
+	nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+	NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+	csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
+
+	return 0;
+}
+
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+	struct xcbc_state *sctx = shash_desc_ctx(desc);
+
+	memset(sctx, 0, sizeof *sctx);
+
+	return 0;
+}
+
+static int nx_xcbc_update(struct shash_desc *desc,
+			  const u8          *data,
+			  unsigned int       len)
+{
+	struct xcbc_state *sctx = shash_desc_ctx(desc);
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	struct nx_sg *in_sg;
+	struct nx_sg *out_sg;
+	u32 to_process = 0, leftover, total;
+	unsigned int max_sg_len;
+	unsigned long irq_flags;
+	int rc = 0;
+	int data_len;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+
+	total = sctx->count + len;
+
+	/* 2 cases for total data len:
+	 *  1: <= AES_BLOCK_SIZE: copy into state, return 0
+	 *  2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
+	 */
+	if (total <= AES_BLOCK_SIZE) {
+		memcpy(sctx->buffer + sctx->count, data, len);
+		sctx->count += len;
+		goto out;
+	}
+
+	in_sg = nx_ctx->in_sg;
+	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+				nx_ctx->ap->sglen);
+	max_sg_len = min_t(u64, max_sg_len,
+				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	data_len = AES_BLOCK_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+				  &len, nx_ctx->ap->sglen);
+
+	if (data_len != AES_BLOCK_SIZE) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+	do {
+		to_process = total - to_process;
+		to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+
+		leftover = total - to_process;
+
+		/* the hardware will not accept a 0 byte operation for this
+		 * algorithm and the operation MUST be finalized to be correct.
+		 * So if we happen to get an update that falls on a block sized
+		 * boundary, we must save off the last block to finalize with
+		 * later. */
+		if (!leftover) {
+			to_process -= AES_BLOCK_SIZE;
+			leftover = AES_BLOCK_SIZE;
+		}
+
+		if (sctx->count) {
+			data_len = sctx->count;
+			in_sg = nx_build_sg_list(nx_ctx->in_sg,
+						(u8 *) sctx->buffer,
+						&data_len,
+						max_sg_len);
+			if (data_len != sctx->count) {
+				rc = -EINVAL;
+				goto out;
+			}
+		}
+
+		data_len = to_process - sctx->count;
+		in_sg = nx_build_sg_list(in_sg,
+					(u8 *) data,
+					&data_len,
+					max_sg_len);
+
+		if (data_len != to_process - sctx->count) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+					sizeof(struct nx_sg);
+
+		/* we've hit the nx chip previously and we're updating again,
+		 * so copy over the partial digest */
+		if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+			memcpy(csbcpb->cpb.aes_xcbc.cv,
+				csbcpb->cpb.aes_xcbc.out_cv_mac,
+				AES_BLOCK_SIZE);
+		}
+
+		NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		atomic_inc(&(nx_ctx->stats->aes_ops));
+
+		/* everything after the first update is continuation */
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+		total -= to_process;
+		data += to_process - sctx->count;
+		sctx->count = 0;
+		in_sg = nx_ctx->in_sg;
+	} while (leftover > AES_BLOCK_SIZE);
+
+	/* copy the leftover back into the state struct */
+	memcpy(sctx->buffer, data, leftover);
+	sctx->count = leftover;
+
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+{
+	struct xcbc_state *sctx = shash_desc_ctx(desc);
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+	struct nx_sg *in_sg, *out_sg;
+	unsigned long irq_flags;
+	int rc = 0;
+	int len;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+		/* we've hit the nx chip previously, now we're finalizing,
+		 * so copy over the partial digest */
+		memcpy(csbcpb->cpb.aes_xcbc.cv,
+		       csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
+	} else if (sctx->count == 0) {
+		/*
+		 * we've never seen an update, so this is a 0 byte op. The
+		 * hardware cannot handle a 0 byte op, so just ECB to
+		 * generate the hash.
+		 */
+		rc = nx_xcbc_empty(desc, out);
+		goto out;
+	}
+
+	/* final is represented by continuing the operation and indicating that
+	 * this is not an intermediate operation */
+	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+	len = sctx->count;
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
+				 &len, nx_ctx->ap->sglen);
+
+	if (len != sctx->count) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	len = AES_BLOCK_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+				  nx_ctx->ap->sglen);
+
+	if (len != AES_BLOCK_SIZE) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+	if (!nx_ctx->op.outlen) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	if (rc)
+		goto out;
+
+	atomic_inc(&(nx_ctx->stats->aes_ops));
+
+	memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+struct shash_alg nx_shash_aes_xcbc_alg = {
+	.digestsize = AES_BLOCK_SIZE,
+	.init       = nx_xcbc_init,
+	.update     = nx_xcbc_update,
+	.final      = nx_xcbc_final,
+	.setkey     = nx_xcbc_set_key,
+	.descsize   = sizeof(struct xcbc_state),
+	.statesize  = sizeof(struct xcbc_state),
+	.base       = {
+		.cra_name        = "xcbc(aes)",
+		.cra_driver_name = "xcbc-aes-nx",
+		.cra_priority    = 300,
+		.cra_blocksize   = AES_BLOCK_SIZE,
+		.cra_module      = THIS_MODULE,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_init        = nx_crypto_ctx_aes_xcbc_init2,
+		.cra_exit        = nx_crypto_ctx_exit,
+	}
+};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
new file mode 100644
index 0000000..a6764af
--- /dev/null
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -0,0 +1,297 @@
+/**
+ * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/module.h>
+#include <asm/vio.h>
+#include <asm/byteorder.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+	int err;
+
+	err = nx_crypto_ctx_sha_init(tfm);
+	if (err)
+		return err;
+
+	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
+
+	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
+
+	return 0;
+}
+
+static int nx_sha256_init(struct shash_desc *desc) {
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	memset(sctx, 0, sizeof *sctx);
+
+	sctx->state[0] = __cpu_to_be32(SHA256_H0);
+	sctx->state[1] = __cpu_to_be32(SHA256_H1);
+	sctx->state[2] = __cpu_to_be32(SHA256_H2);
+	sctx->state[3] = __cpu_to_be32(SHA256_H3);
+	sctx->state[4] = __cpu_to_be32(SHA256_H4);
+	sctx->state[5] = __cpu_to_be32(SHA256_H5);
+	sctx->state[6] = __cpu_to_be32(SHA256_H6);
+	sctx->state[7] = __cpu_to_be32(SHA256_H7);
+	sctx->count = 0;
+
+	return 0;
+}
+
+static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+			    unsigned int len)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+	struct nx_sg *out_sg;
+	u64 to_process = 0, leftover, total;
+	unsigned long irq_flags;
+	int rc = 0;
+	int data_len;
+	u32 max_sg_len;
+	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	/* 2 cases for total data len:
+	 *  1: < SHA256_BLOCK_SIZE: copy into state, return 0
+	 *  2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
+	 */
+	total = (sctx->count % SHA256_BLOCK_SIZE) + len;
+	if (total < SHA256_BLOCK_SIZE) {
+		memcpy(sctx->buf + buf_len, data, len);
+		sctx->count += len;
+		goto out;
+	}
+
+	memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
+	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	data_len = SHA256_DIGEST_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+				  &data_len, max_sg_len);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+	if (data_len != SHA256_DIGEST_SIZE) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	do {
+		int used_sgs = 0;
+		struct nx_sg *in_sg = nx_ctx->in_sg;
+
+		if (buf_len) {
+			data_len = buf_len;
+			in_sg = nx_build_sg_list(in_sg,
+						 (u8 *) sctx->buf,
+						 &data_len,
+						 max_sg_len);
+
+			if (data_len != buf_len) {
+				rc = -EINVAL;
+				goto out;
+			}
+			used_sgs = in_sg - nx_ctx->in_sg;
+		}
+
+		/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+		 * processed in this iteration. This value is restricted
+		 * by sg list limits and number of sgs we already used
+		 * for leftover data. (see above)
+		 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+		 * but because data may not be aligned, we need to account
+		 * for that too. */
+		to_process = min_t(u64, total,
+			(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+		to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
+		data_len = to_process - buf_len;
+		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+					 &data_len, max_sg_len);
+
+		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+
+		to_process = data_len + buf_len;
+		leftover = total - to_process;
+
+		/*
+		 * we've hit the nx chip previously and we're updating
+		 * again, so copy over the partial digest.
+		 */
+		memcpy(csbcpb->cpb.sha256.input_partial_digest,
+			       csbcpb->cpb.sha256.message_digest,
+			       SHA256_DIGEST_SIZE);
+
+		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		atomic_inc(&(nx_ctx->stats->sha256_ops));
+
+		total -= to_process;
+		data += to_process - buf_len;
+		buf_len = 0;
+
+	} while (leftover >= SHA256_BLOCK_SIZE);
+
+	/* copy the leftover back into the state struct */
+	if (leftover)
+		memcpy(sctx->buf, data, leftover);
+
+	sctx->count += len;
+	memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+	struct nx_sg *in_sg, *out_sg;
+	unsigned long irq_flags;
+	u32 max_sg_len;
+	int rc = 0;
+	int len;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	/* final is represented by continuing the operation and indicating that
+	 * this is not an intermediate operation */
+	if (sctx->count >= SHA256_BLOCK_SIZE) {
+		/* we've hit the nx chip previously, now we're finalizing,
+		 * so copy over the partial digest */
+		memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+	} else {
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+	}
+
+	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
+
+	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
+				 &len, max_sg_len);
+
+	if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	len = SHA256_DIGEST_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
+
+	if (len != SHA256_DIGEST_SIZE) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+	if (!nx_ctx->op.outlen) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	if (rc)
+		goto out;
+
+	atomic_inc(&(nx_ctx->stats->sha256_ops));
+
+	atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
+	memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int nx_sha256_export(struct shash_desc *desc, void *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(out, sctx, sizeof(*sctx));
+
+	return 0;
+}
+
+static int nx_sha256_import(struct shash_desc *desc, const void *in)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(sctx, in, sizeof(*sctx));
+
+	return 0;
+}
+
+struct shash_alg nx_shash_sha256_alg = {
+	.digestsize = SHA256_DIGEST_SIZE,
+	.init       = nx_sha256_init,
+	.update     = nx_sha256_update,
+	.final      = nx_sha256_final,
+	.export     = nx_sha256_export,
+	.import     = nx_sha256_import,
+	.descsize   = sizeof(struct sha256_state),
+	.statesize  = sizeof(struct sha256_state),
+	.base       = {
+		.cra_name        = "sha256",
+		.cra_driver_name = "sha256-nx",
+		.cra_priority    = 300,
+		.cra_blocksize   = SHA256_BLOCK_SIZE,
+		.cra_module      = THIS_MODULE,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_init        = nx_crypto_ctx_sha256_init,
+		.cra_exit        = nx_crypto_ctx_exit,
+	}
+};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
new file mode 100644
index 0000000..92956bc
--- /dev/null
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -0,0 +1,303 @@
+/**
+ * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/module.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+	int err;
+
+	err = nx_crypto_ctx_sha_init(tfm);
+	if (err)
+		return err;
+
+	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
+
+	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
+
+	return 0;
+}
+
+static int nx_sha512_init(struct shash_desc *desc)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	memset(sctx, 0, sizeof *sctx);
+
+	sctx->state[0] = __cpu_to_be64(SHA512_H0);
+	sctx->state[1] = __cpu_to_be64(SHA512_H1);
+	sctx->state[2] = __cpu_to_be64(SHA512_H2);
+	sctx->state[3] = __cpu_to_be64(SHA512_H3);
+	sctx->state[4] = __cpu_to_be64(SHA512_H4);
+	sctx->state[5] = __cpu_to_be64(SHA512_H5);
+	sctx->state[6] = __cpu_to_be64(SHA512_H6);
+	sctx->state[7] = __cpu_to_be64(SHA512_H7);
+	sctx->count[0] = 0;
+
+	return 0;
+}
+
+static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+			    unsigned int len)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+	struct nx_sg *out_sg;
+	u64 to_process, leftover = 0, total;
+	unsigned long irq_flags;
+	int rc = 0;
+	int data_len;
+	u32 max_sg_len;
+	u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	/* 2 cases for total data len:
+	 *  1: < SHA512_BLOCK_SIZE: copy into state, return 0
+	 *  2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
+	 */
+	total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
+	if (total < SHA512_BLOCK_SIZE) {
+		memcpy(sctx->buf + buf_len, data, len);
+		sctx->count[0] += len;
+		goto out;
+	}
+
+	memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
+	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	data_len = SHA512_DIGEST_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+				  &data_len, max_sg_len);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+	if (data_len != SHA512_DIGEST_SIZE) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	do {
+		int used_sgs = 0;
+		struct nx_sg *in_sg = nx_ctx->in_sg;
+
+		if (buf_len) {
+			data_len = buf_len;
+			in_sg = nx_build_sg_list(in_sg,
+						 (u8 *) sctx->buf,
+						 &data_len, max_sg_len);
+
+			if (data_len != buf_len) {
+				rc = -EINVAL;
+				goto out;
+			}
+			used_sgs = in_sg - nx_ctx->in_sg;
+		}
+
+		/* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+		 * processed in this iteration. This value is restricted
+		 * by sg list limits and number of sgs we already used
+		 * for leftover data. (see above)
+		 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+		 * but because data may not be aligned, we need to account
+		 * for that too. */
+		to_process = min_t(u64, total,
+			(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+		to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
+		data_len = to_process - buf_len;
+		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+					 &data_len, max_sg_len);
+
+		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+
+		if (data_len != (to_process - buf_len)) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		to_process = data_len + buf_len;
+		leftover = total - to_process;
+
+		/*
+		 * we've hit the nx chip previously and we're updating
+		 * again, so copy over the partial digest.
+		 */
+		memcpy(csbcpb->cpb.sha512.input_partial_digest,
+			       csbcpb->cpb.sha512.message_digest,
+			       SHA512_DIGEST_SIZE);
+
+		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		if (rc)
+			goto out;
+
+		atomic_inc(&(nx_ctx->stats->sha512_ops));
+
+		total -= to_process;
+		data += to_process - buf_len;
+		buf_len = 0;
+
+	} while (leftover >= SHA512_BLOCK_SIZE);
+
+	/* copy the leftover back into the state struct */
+	if (leftover)
+		memcpy(sctx->buf, data, leftover);
+	sctx->count[0] += len;
+	memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+	struct nx_sg *in_sg, *out_sg;
+	u32 max_sg_len;
+	u64 count0;
+	unsigned long irq_flags;
+	int rc = 0;
+	int len;
+
+	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	/* final is represented by continuing the operation and indicating that
+	 * this is not an intermediate operation */
+	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
+		/* we've hit the nx chip previously, now we're finalizing,
+		 * so copy over the partial digest */
+		memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
+							SHA512_DIGEST_SIZE);
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+	} else {
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+	}
+
+	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+	count0 = sctx->count[0] * 8;
+
+	csbcpb->cpb.sha512.message_bit_length_lo = count0;
+
+	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
+				 max_sg_len);
+
+	if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	len = SHA512_DIGEST_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+				 max_sg_len);
+
+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+	if (!nx_ctx->op.outlen) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	if (rc)
+		goto out;
+
+	atomic_inc(&(nx_ctx->stats->sha512_ops));
+	atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
+
+	memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+out:
+	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+	return rc;
+}
+
+static int nx_sha512_export(struct shash_desc *desc, void *out)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(out, sctx, sizeof(*sctx));
+
+	return 0;
+}
+
+static int nx_sha512_import(struct shash_desc *desc, const void *in)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(sctx, in, sizeof(*sctx));
+
+	return 0;
+}
+
+struct shash_alg nx_shash_sha512_alg = {
+	.digestsize = SHA512_DIGEST_SIZE,
+	.init       = nx_sha512_init,
+	.update     = nx_sha512_update,
+	.final      = nx_sha512_final,
+	.export     = nx_sha512_export,
+	.import     = nx_sha512_import,
+	.descsize   = sizeof(struct sha512_state),
+	.statesize  = sizeof(struct sha512_state),
+	.base       = {
+		.cra_name        = "sha512",
+		.cra_driver_name = "sha512-nx",
+		.cra_priority    = 300,
+		.cra_blocksize   = SHA512_BLOCK_SIZE,
+		.cra_module      = THIS_MODULE,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_init        = nx_crypto_ctx_sha512_init,
+		.cra_exit        = nx_crypto_ctx_exit,
+	}
+};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
new file mode 100644
index 0000000..3a5e31b
--- /dev/null
+++ b/drivers/crypto/nx/nx.c
@@ -0,0 +1,858 @@
+/**
+ * Routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <asm/hvcall.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+/**
+ * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
+ *
+ * @nx_ctx: the crypto context handle
+ * @op: PFO operation struct to pass in
+ * @may_sleep: flag indicating the request can sleep
+ *
+ * Make the hcall, retrying while the hardware is busy. If we cannot yield
+ * the thread, limit the number of retries to 10 here.
+ */
+int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
+		  struct vio_pfo_op    *op,
+		  u32                   may_sleep)
+{
+	int rc, retries = 10;
+	struct vio_dev *viodev = nx_driver.viodev;
+
+	atomic_inc(&(nx_ctx->stats->sync_ops));
+
+	do {
+		rc = vio_h_cop_sync(viodev, op);
+	} while (rc == -EBUSY && !may_sleep && retries--);
+
+	if (rc) {
+		dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
+			"hcall rc: %ld\n", rc, op->hcall_err);
+		atomic_inc(&(nx_ctx->stats->errors));
+		atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
+		atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
+	}
+
+	return rc;
+}
+
+/**
+ * nx_build_sg_list - build an NX scatter list describing a single  buffer
+ *
+ * @sg_head: pointer to the first scatter list element to build
+ * @start_addr: pointer to the linear buffer
+ * @len: length of the data at @start_addr
+ * @sgmax: the largest number of scatter list elements we're allowed to create
+ *
+ * This function will start writing nx_sg elements at @sg_head and keep
+ * writing them until all of the data from @start_addr is described or
+ * until sgmax elements have been written. Scatter list elements will be
+ * created such that none of the elements describes a buffer that crosses a 4K
+ * boundary.
+ */
+struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
+			       u8           *start_addr,
+			       unsigned int *len,
+			       u32           sgmax)
+{
+	unsigned int sg_len = 0;
+	struct nx_sg *sg;
+	u64 sg_addr = (u64)start_addr;
+	u64 end_addr;
+
+	/* determine the start and end for this address range - slightly
+	 * different if this is in VMALLOC_REGION */
+	if (is_vmalloc_addr(start_addr))
+		sg_addr = page_to_phys(vmalloc_to_page(start_addr))
+			  + offset_in_page(sg_addr);
+	else
+		sg_addr = __pa(sg_addr);
+
+	end_addr = sg_addr + *len;
+
+	/* each iteration will write one struct nx_sg element and add the
+	 * length of data described by that element to sg_len. Once @len bytes
+	 * have been described (or @sgmax elements have been written), the
+	 * loop ends. min_t is used to ensure @end_addr falls on the same page
+	 * as sg_addr, if not, we need to create another nx_sg element for the
+	 * data on the next page.
+	 *
+	 * Also when using vmalloc'ed data, every time that a system page
+	 * boundary is crossed the physical address needs to be re-calculated.
+	 */
+	for (sg = sg_head; sg_len < *len; sg++) {
+		u64 next_page;
+
+		sg->addr = sg_addr;
+		sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
+				end_addr);
+
+		next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
+		sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
+		sg_len += sg->len;
+
+		if (sg_addr >= next_page &&
+				is_vmalloc_addr(start_addr + sg_len)) {
+			sg_addr = page_to_phys(vmalloc_to_page(
+						start_addr + sg_len));
+			end_addr = sg_addr + *len - sg_len;
+		}
+
+		if ((sg - sg_head) == sgmax) {
+			pr_err("nx: scatter/gather list overflow, pid: %d\n",
+			       current->pid);
+			sg++;
+			break;
+		}
+	}
+	*len = sg_len;
+
+	/* return the moved sg_head pointer */
+	return sg;
+}
+
+/**
+ * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
+ *
+ * @nx_dst: pointer to the first nx_sg element to write
+ * @sglen: max number of nx_sg entries we're allowed to write
+ * @sg_src: pointer to the source linux scatterlist to walk
+ * @start: number of bytes to fast-forward past at the beginning of @sg_src
+ * @src_len: number of bytes to walk in @sg_src
+ */
+struct nx_sg *nx_walk_and_build(struct nx_sg       *nx_dst,
+				unsigned int        sglen,
+				struct scatterlist *sg_src,
+				unsigned int        start,
+				unsigned int       *src_len)
+{
+	struct scatter_walk walk;
+	struct nx_sg *nx_sg = nx_dst;
+	unsigned int n, offset = 0, len = *src_len;
+	char *dst;
+
+	/* we need to fast forward through @start bytes first */
+	for (;;) {
+		scatterwalk_start(&walk, sg_src);
+
+		if (start < offset + sg_src->length)
+			break;
+
+		offset += sg_src->length;
+		sg_src = sg_next(sg_src);
+	}
+
+	/* start - offset is the number of bytes to advance in the scatterlist
+	 * element we're currently looking at */
+	scatterwalk_advance(&walk, start - offset);
+
+	while (len && (nx_sg - nx_dst) < sglen) {
+		n = scatterwalk_clamp(&walk, len);
+		if (!n) {
+			/* In cases where we have scatterlist chain sg_next
+			 * handles with it properly */
+			scatterwalk_start(&walk, sg_next(walk.sg));
+			n = scatterwalk_clamp(&walk, len);
+		}
+		dst = scatterwalk_map(&walk);
+
+		nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
+		len -= n;
+
+		scatterwalk_unmap(dst);
+		scatterwalk_advance(&walk, n);
+		scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
+	}
+	/* update to_process */
+	*src_len -= len;
+
+	/* return the moved destination pointer */
+	return nx_sg;
+}
+
+/**
+ * trim_sg_list - ensures the bound in sg list.
+ * @sg: sg list head
+ * @end: sg lisg end
+ * @delta:  is the amount we need to crop in order to bound the list.
+ *
+ */
+static long int trim_sg_list(struct nx_sg *sg,
+			     struct nx_sg *end,
+			     unsigned int delta,
+			     unsigned int *nbytes)
+{
+	long int oplen;
+	long int data_back;
+	unsigned int is_delta = delta;
+
+	while (delta && end > sg) {
+		struct nx_sg *last = end - 1;
+
+		if (last->len > delta) {
+			last->len -= delta;
+			delta = 0;
+		} else {
+			end--;
+			delta -= last->len;
+		}
+	}
+
+	/* There are cases where we need to crop list in order to make it
+	 * a block size multiple, but we also need to align data. In order to
+	 * that we need to calculate how much we need to put back to be
+	 * processed
+	 */
+	oplen = (sg - end) * sizeof(struct nx_sg);
+	if (is_delta) {
+		data_back = (abs(oplen) / AES_BLOCK_SIZE) *  sg->len;
+		data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
+		*nbytes -= data_back;
+	}
+
+	return oplen;
+}
+
+/**
+ * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
+ *                     scatterlists based on them.
+ *
+ * @nx_ctx: NX crypto context for the lists we're building
+ * @desc: the block cipher descriptor for the operation
+ * @dst: destination scatterlist
+ * @src: source scatterlist
+ * @nbytes: length of data described in the scatterlists
+ * @offset: number of bytes to fast-forward past at the beginning of
+ *          scatterlists.
+ * @iv: destination for the iv data, if the algorithm requires it
+ *
+ * This is common code shared by all the AES algorithms. It uses the block
+ * cipher walk routines to traverse input and output scatterlists, building
+ * corresponding NX scatterlists
+ */
+int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
+		      struct blkcipher_desc *desc,
+		      struct scatterlist    *dst,
+		      struct scatterlist    *src,
+		      unsigned int          *nbytes,
+		      unsigned int           offset,
+		      u8                    *iv)
+{
+	unsigned int delta = 0;
+	unsigned int total = *nbytes;
+	struct nx_sg *nx_insg = nx_ctx->in_sg;
+	struct nx_sg *nx_outsg = nx_ctx->out_sg;
+	unsigned int max_sg_len;
+
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	if (iv)
+		memcpy(iv, desc->info, AES_BLOCK_SIZE);
+
+	*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+
+	nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
+					offset, nbytes);
+	nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
+					offset, nbytes);
+
+	if (*nbytes < total)
+		delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
+
+	/* these lengths should be negative, which will indicate to phyp that
+	 * the input and output parameters are scatterlists, not linear
+	 * buffers */
+	nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
+	nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
+
+	return 0;
+}
+
+/**
+ * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
+ *
+ * @nx_ctx: the nx context to initialize
+ * @function: the function code for the op
+ */
+void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
+{
+	spin_lock_init(&nx_ctx->lock);
+	memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
+	nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
+
+	nx_ctx->op.flags = function;
+	nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
+	nx_ctx->op.in = __pa(nx_ctx->in_sg);
+	nx_ctx->op.out = __pa(nx_ctx->out_sg);
+
+	if (nx_ctx->csbcpb_aead) {
+		nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
+
+		nx_ctx->op_aead.flags = function;
+		nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
+		nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
+		nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
+	}
+}
+
+static void nx_of_update_status(struct device   *dev,
+			       struct property *p,
+			       struct nx_of    *props)
+{
+	if (!strncmp(p->value, "okay", p->length)) {
+		props->status = NX_WAITING;
+		props->flags |= NX_OF_FLAG_STATUS_SET;
+	} else {
+		dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
+			 (char *)p->value);
+	}
+}
+
+static void nx_of_update_sglen(struct device   *dev,
+			       struct property *p,
+			       struct nx_of    *props)
+{
+	if (p->length != sizeof(props->max_sg_len)) {
+		dev_err(dev, "%s: unexpected format for "
+			"ibm,max-sg-len property\n", __func__);
+		dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
+			"long, expected %zd bytes\n", __func__,
+			p->length, sizeof(props->max_sg_len));
+		return;
+	}
+
+	props->max_sg_len = *(u32 *)p->value;
+	props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
+}
+
+static void nx_of_update_msc(struct device   *dev,
+			     struct property *p,
+			     struct nx_of    *props)
+{
+	struct msc_triplet *trip;
+	struct max_sync_cop *msc;
+	unsigned int bytes_so_far, i, lenp;
+
+	msc = (struct max_sync_cop *)p->value;
+	lenp = p->length;
+
+	/* You can't tell if the data read in for this property is sane by its
+	 * size alone. This is because there are sizes embedded in the data
+	 * structure. The best we can do is check lengths as we parse and bail
+	 * as soon as a length error is detected. */
+	bytes_so_far = 0;
+
+	while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
+		bytes_so_far += sizeof(struct max_sync_cop);
+
+		trip = msc->trip;
+
+		for (i = 0;
+		     ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
+		     i < msc->triplets;
+		     i++) {
+			if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
+				dev_err(dev, "unknown function code/mode "
+					"combo: %d/%d (ignored)\n", msc->fc,
+					msc->mode);
+				goto next_loop;
+			}
+
+			if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) {
+				dev_warn(dev, "bogus sglen/databytelen: "
+					 "%u/%u (ignored)\n", trip->sglen,
+					 trip->databytelen);
+				goto next_loop;
+			}
+
+			switch (trip->keybitlen) {
+			case 128:
+			case 160:
+				props->ap[msc->fc][msc->mode][0].databytelen =
+					trip->databytelen;
+				props->ap[msc->fc][msc->mode][0].sglen =
+					trip->sglen;
+				break;
+			case 192:
+				props->ap[msc->fc][msc->mode][1].databytelen =
+					trip->databytelen;
+				props->ap[msc->fc][msc->mode][1].sglen =
+					trip->sglen;
+				break;
+			case 256:
+				if (msc->fc == NX_FC_AES) {
+					props->ap[msc->fc][msc->mode][2].
+						databytelen = trip->databytelen;
+					props->ap[msc->fc][msc->mode][2].sglen =
+						trip->sglen;
+				} else if (msc->fc == NX_FC_AES_HMAC ||
+					   msc->fc == NX_FC_SHA) {
+					props->ap[msc->fc][msc->mode][1].
+						databytelen = trip->databytelen;
+					props->ap[msc->fc][msc->mode][1].sglen =
+						trip->sglen;
+				} else {
+					dev_warn(dev, "unknown function "
+						"code/key bit len combo"
+						": (%u/256)\n", msc->fc);
+				}
+				break;
+			case 512:
+				props->ap[msc->fc][msc->mode][2].databytelen =
+					trip->databytelen;
+				props->ap[msc->fc][msc->mode][2].sglen =
+					trip->sglen;
+				break;
+			default:
+				dev_warn(dev, "unknown function code/key bit "
+					 "len combo: (%u/%u)\n", msc->fc,
+					 trip->keybitlen);
+				break;
+			}
+next_loop:
+			bytes_so_far += sizeof(struct msc_triplet);
+			trip++;
+		}
+
+		msc = (struct max_sync_cop *)trip;
+	}
+
+	props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
+}
+
+/**
+ * nx_of_init - read openFirmware values from the device tree
+ *
+ * @dev: device handle
+ * @props: pointer to struct to hold the properties values
+ *
+ * Called once at driver probe time, this function will read out the
+ * openFirmware properties we use at runtime. If all the OF properties are
+ * acceptable, when we exit this function props->flags will indicate that
+ * we're ready to register our crypto algorithms.
+ */
+static void nx_of_init(struct device *dev, struct nx_of *props)
+{
+	struct device_node *base_node = dev->of_node;
+	struct property *p;
+
+	p = of_find_property(base_node, "status", NULL);
+	if (!p)
+		dev_info(dev, "%s: property 'status' not found\n", __func__);
+	else
+		nx_of_update_status(dev, p, props);
+
+	p = of_find_property(base_node, "ibm,max-sg-len", NULL);
+	if (!p)
+		dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
+			 __func__);
+	else
+		nx_of_update_sglen(dev, p, props);
+
+	p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
+	if (!p)
+		dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
+			 __func__);
+	else
+		nx_of_update_msc(dev, p, props);
+}
+
+static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot)
+{
+	struct alg_props *props = &nx_driver.of.ap[fc][mode][slot];
+
+	if (!props->sglen || props->databytelen < NX_PAGE_SIZE) {
+		if (dev)
+			dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: "
+				 "%u/%u (ignored)\n", fc, mode, slot,
+				 props->sglen, props->databytelen);
+		return false;
+	}
+
+	return true;
+}
+
+static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
+{
+	int i;
+
+	for (i = 0; i < 3; i++)
+		if (!nx_check_prop(dev, fc, mode, i))
+			return false;
+
+	return true;
+}
+
+static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+{
+	return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
+	       crypto_register_alg(alg) : 0;
+}
+
+static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
+{
+	return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
+	       crypto_register_aead(alg) : 0;
+}
+
+static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
+{
+	return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev,
+					  fc, mode, slot) :
+			    nx_check_props(&nx_driver.viodev->dev, fc, mode)) ?
+	       crypto_register_shash(alg) : 0;
+}
+
+static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode)
+{
+	if (nx_check_props(NULL, fc, mode))
+		crypto_unregister_alg(alg);
+}
+
+static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
+{
+	if (nx_check_props(NULL, fc, mode))
+		crypto_unregister_aead(alg);
+}
+
+static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode,
+				int slot)
+{
+	if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) :
+			nx_check_props(NULL, fc, mode))
+		crypto_unregister_shash(alg);
+}
+
+/**
+ * nx_register_algs - register algorithms with the crypto API
+ *
+ * Called from nx_probe()
+ *
+ * If all OF properties are in an acceptable state, the driver flags will
+ * indicate that we're ready and we'll create our debugfs files and register
+ * out crypto algorithms.
+ */
+static int nx_register_algs(void)
+{
+	int rc = -1;
+
+	if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
+		goto out;
+
+	memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
+
+	rc = NX_DEBUGFS_INIT(&nx_driver);
+	if (rc)
+		goto out;
+
+	nx_driver.of.status = NX_OKAY;
+
+	rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+	if (rc)
+		goto out;
+
+	rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+	if (rc)
+		goto out_unreg_ecb;
+
+	rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+	if (rc)
+		goto out_unreg_cbc;
+
+	rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
+	if (rc)
+		goto out_unreg_ctr3686;
+
+	rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
+	if (rc)
+		goto out_unreg_gcm;
+
+	rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	if (rc)
+		goto out_unreg_gcm4106;
+
+	rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	if (rc)
+		goto out_unreg_ccm;
+
+	rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
+			       NX_PROPS_SHA256);
+	if (rc)
+		goto out_unreg_ccm4309;
+
+	rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
+			       NX_PROPS_SHA512);
+	if (rc)
+		goto out_unreg_s256;
+
+	rc = nx_register_shash(&nx_shash_aes_xcbc_alg,
+			       NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
+	if (rc)
+		goto out_unreg_s512;
+
+	goto out;
+
+out_unreg_s512:
+	nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
+			    NX_PROPS_SHA512);
+out_unreg_s256:
+	nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
+			    NX_PROPS_SHA256);
+out_unreg_ccm4309:
+	nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+out_unreg_ccm:
+	nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+out_unreg_gcm4106:
+	nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
+out_unreg_gcm:
+	nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
+out_unreg_ctr3686:
+	nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+out_unreg_cbc:
+	nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+out_unreg_ecb:
+	nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+out:
+	return rc;
+}
+
+/**
+ * nx_crypto_ctx_init - create and initialize a crypto api context
+ *
+ * @nx_ctx: the crypto api context
+ * @fc: function code for the context
+ * @mode: the function code specific mode for this context
+ */
+static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
+{
+	if (nx_driver.of.status != NX_OKAY) {
+		pr_err("Attempt to initialize NX crypto context while device "
+		       "is not available!\n");
+		return -ENODEV;
+	}
+
+	/* we need an extra page for csbcpb_aead for these modes */
+	if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
+		nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
+				   sizeof(struct nx_csbcpb);
+	else
+		nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
+				   sizeof(struct nx_csbcpb);
+
+	nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
+	if (!nx_ctx->kmem)
+		return -ENOMEM;
+
+	/* the csbcpb and scatterlists must be 4K aligned pages */
+	nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
+						       (u64)NX_PAGE_SIZE));
+	nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
+	nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
+
+	if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
+		nx_ctx->csbcpb_aead =
+			(struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
+					     NX_PAGE_SIZE);
+
+	/* give each context a pointer to global stats and their OF
+	 * properties */
+	nx_ctx->stats = &nx_driver.stats;
+	memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
+	       sizeof(struct alg_props) * 3);
+
+	return 0;
+}
+
+/* entry points from the crypto tfm initializers */
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
+{
+	crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
+	return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
+				  NX_MODE_AES_CCM);
+}
+
+int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
+{
+	crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
+	return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
+				  NX_MODE_AES_GCM);
+}
+
+int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
+{
+	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+				  NX_MODE_AES_CTR);
+}
+
+int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
+{
+	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+				  NX_MODE_AES_CBC);
+}
+
+int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
+{
+	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+				  NX_MODE_AES_ECB);
+}
+
+int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
+{
+	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
+}
+
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
+{
+	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+				  NX_MODE_AES_XCBC_MAC);
+}
+
+/**
+ * nx_crypto_ctx_exit - destroy a crypto api context
+ *
+ * @tfm: the crypto transform pointer for the context
+ *
+ * As crypto API contexts are destroyed, this exit hook is called to free the
+ * memory associated with it.
+ */
+void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+
+	kzfree(nx_ctx->kmem);
+	nx_ctx->csbcpb = NULL;
+	nx_ctx->csbcpb_aead = NULL;
+	nx_ctx->in_sg = NULL;
+	nx_ctx->out_sg = NULL;
+}
+
+void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
+{
+	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
+
+	kzfree(nx_ctx->kmem);
+}
+
+static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
+{
+	dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
+		viodev->name, viodev->resource_id);
+
+	if (nx_driver.viodev) {
+		dev_err(&viodev->dev, "%s: Attempt to register more than one "
+			"instance of the hardware\n", __func__);
+		return -EINVAL;
+	}
+
+	nx_driver.viodev = viodev;
+
+	nx_of_init(&viodev->dev, &nx_driver.of);
+
+	return nx_register_algs();
+}
+
+static int nx_remove(struct vio_dev *viodev)
+{
+	dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
+		viodev->unit_address);
+
+	if (nx_driver.of.status == NX_OKAY) {
+		NX_DEBUGFS_FINI(&nx_driver);
+
+		nx_unregister_shash(&nx_shash_aes_xcbc_alg,
+				    NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
+		nx_unregister_shash(&nx_shash_sha512_alg,
+				    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
+		nx_unregister_shash(&nx_shash_sha256_alg,
+				    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
+		nx_unregister_aead(&nx_ccm4309_aes_alg,
+				   NX_FC_AES, NX_MODE_AES_CCM);
+		nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+		nx_unregister_aead(&nx_gcm4106_aes_alg,
+				   NX_FC_AES, NX_MODE_AES_GCM);
+		nx_unregister_aead(&nx_gcm_aes_alg,
+				   NX_FC_AES, NX_MODE_AES_GCM);
+		nx_unregister_alg(&nx_ctr3686_aes_alg,
+				  NX_FC_AES, NX_MODE_AES_CTR);
+		nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+		nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+	}
+
+	return 0;
+}
+
+
+/* module wide initialization/cleanup */
+static int __init nx_init(void)
+{
+	return vio_register_driver(&nx_driver.viodriver);
+}
+
+static void __exit nx_fini(void)
+{
+	vio_unregister_driver(&nx_driver.viodriver);
+}
+
+static const struct vio_device_id nx_crypto_driver_ids[] = {
+	{ "ibm,sym-encryption-v1", "ibm,sym-encryption" },
+	{ "", "" }
+};
+MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
+
+/* driver state structure */
+struct nx_crypto_driver nx_driver = {
+	.viodriver = {
+		.id_table = nx_crypto_driver_ids,
+		.probe = nx_probe,
+		.remove = nx_remove,
+		.name  = NX_NAME,
+	},
+};
+
+module_init(nx_init);
+module_exit(nx_fini);
+
+MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
+MODULE_DESCRIPTION(NX_STRING);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NX_VERSION);
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
new file mode 100644
index 0000000..c3e54af
--- /dev/null
+++ b/drivers/crypto/nx/nx.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NX_H__
+#define __NX_H__
+
+#include <crypto/ctr.h>
+
+#define NX_NAME		"nx-crypto"
+#define NX_STRING	"IBM Power7+ Nest Accelerator Crypto Driver"
+#define NX_VERSION	"1.0"
+
+static const char nx_driver_string[] = NX_STRING;
+static const char nx_driver_version[] = NX_VERSION;
+
+/* a scatterlist in the format PHYP is expecting */
+struct nx_sg {
+	u64 addr;
+	u32 rsvd;
+	u32 len;
+} __attribute((packed));
+
+#define NX_PAGE_SIZE		(4096)
+#define NX_MAX_SG_ENTRIES	(NX_PAGE_SIZE/(sizeof(struct nx_sg)))
+
+enum nx_status {
+	NX_DISABLED,
+	NX_WAITING,
+	NX_OKAY
+};
+
+/* msc_triplet and max_sync_cop are used only to assist in parsing the
+ * openFirmware property */
+struct msc_triplet {
+	u32 keybitlen;
+	u32 databytelen;
+	u32 sglen;
+} __packed;
+
+struct max_sync_cop {
+	u32 fc;
+	u32 mode;
+	u32 triplets;
+	struct msc_triplet trip[0];
+} __packed;
+
+struct alg_props {
+	u32 databytelen;
+	u32 sglen;
+};
+
+#define NX_OF_FLAG_MAXSGLEN_SET		(1)
+#define NX_OF_FLAG_STATUS_SET		(2)
+#define NX_OF_FLAG_MAXSYNCCOP_SET	(4)
+#define NX_OF_FLAG_MASK_READY		(NX_OF_FLAG_MAXSGLEN_SET | \
+					 NX_OF_FLAG_STATUS_SET |   \
+					 NX_OF_FLAG_MAXSYNCCOP_SET)
+struct nx_of {
+	u32 flags;
+	u32 max_sg_len;
+	enum nx_status status;
+	struct alg_props ap[NX_MAX_FC][NX_MAX_MODE][3];
+};
+
+struct nx_stats {
+	atomic_t aes_ops;
+	atomic64_t aes_bytes;
+	atomic_t sha256_ops;
+	atomic64_t sha256_bytes;
+	atomic_t sha512_ops;
+	atomic64_t sha512_bytes;
+
+	atomic_t sync_ops;
+
+	atomic_t errors;
+	atomic_t last_error;
+	atomic_t last_error_pid;
+};
+
+struct nx_debugfs {
+	struct dentry *dfs_root;
+	struct dentry *dfs_aes_ops, *dfs_aes_bytes;
+	struct dentry *dfs_sha256_ops, *dfs_sha256_bytes;
+	struct dentry *dfs_sha512_ops, *dfs_sha512_bytes;
+	struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid;
+};
+
+struct nx_crypto_driver {
+	struct nx_stats    stats;
+	struct nx_of       of;
+	struct vio_dev    *viodev;
+	struct vio_driver  viodriver;
+	struct nx_debugfs  dfs;
+};
+
+#define NX_GCM4106_NONCE_LEN		(4)
+#define NX_GCM_CTR_OFFSET		(12)
+struct nx_gcm_rctx {
+	u8 iv[16];
+};
+
+struct nx_gcm_priv {
+	u8 iauth_tag[16];
+	u8 nonce[NX_GCM4106_NONCE_LEN];
+};
+
+#define NX_CCM_AES_KEY_LEN		(16)
+#define NX_CCM4309_AES_KEY_LEN		(19)
+#define NX_CCM4309_NONCE_LEN		(3)
+struct nx_ccm_rctx {
+	u8 iv[16];
+};
+
+struct nx_ccm_priv {
+	u8 b0[16];
+	u8 iauth_tag[16];
+	u8 oauth_tag[16];
+	u8 nonce[NX_CCM4309_NONCE_LEN];
+};
+
+struct nx_xcbc_priv {
+	u8 key[16];
+};
+
+struct nx_ctr_priv {
+	u8 nonce[CTR_RFC3686_NONCE_SIZE];
+};
+
+struct nx_crypto_ctx {
+	spinlock_t lock;	  /* synchronize access to the context */
+	void *kmem;		  /* unaligned, kmalloc'd buffer */
+	size_t kmem_len;	  /* length of kmem */
+	struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
+	struct vio_pfo_op op;     /* operation struct with hcall parameters */
+	struct nx_csbcpb *csbcpb_aead; /* secondary csbcpb used by AEAD algs */
+	struct vio_pfo_op op_aead;/* operation struct for csbcpb_aead */
+
+	struct nx_sg *in_sg;      /* aligned pointer into kmem to an sg list */
+	struct nx_sg *out_sg;     /* aligned pointer into kmem to an sg list */
+
+	struct alg_props *ap;	  /* pointer into props based on our key size */
+	struct alg_props props[3];/* openFirmware properties for requests */
+	struct nx_stats *stats;   /* pointer into an nx_crypto_driver for stats
+				     reporting */
+
+	union {
+		struct nx_gcm_priv gcm;
+		struct nx_ccm_priv ccm;
+		struct nx_xcbc_priv xcbc;
+		struct nx_ctr_priv ctr;
+	} priv;
+};
+
+struct crypto_aead;
+
+/* prototypes */
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
+int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
+void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
+void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
+void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
+int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
+		  u32 may_sleep);
+struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
+int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
+		      struct scatterlist *, struct scatterlist *, unsigned int *,
+		      unsigned int, u8 *);
+struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
+				struct scatterlist *, unsigned int,
+				unsigned int *);
+
+#ifdef CONFIG_DEBUG_FS
+#define NX_DEBUGFS_INIT(drv)	nx_debugfs_init(drv)
+#define NX_DEBUGFS_FINI(drv)	nx_debugfs_fini(drv)
+
+int nx_debugfs_init(struct nx_crypto_driver *);
+void nx_debugfs_fini(struct nx_crypto_driver *);
+#else
+#define NX_DEBUGFS_INIT(drv)	(0)
+#define NX_DEBUGFS_FINI(drv)	(0)
+#endif
+
+#define NX_PAGE_NUM(x)		((u64)(x) & 0xfffffffffffff000ULL)
+
+extern struct crypto_alg nx_cbc_aes_alg;
+extern struct crypto_alg nx_ecb_aes_alg;
+extern struct aead_alg nx_gcm_aes_alg;
+extern struct aead_alg nx_gcm4106_aes_alg;
+extern struct crypto_alg nx_ctr3686_aes_alg;
+extern struct aead_alg nx_ccm_aes_alg;
+extern struct aead_alg nx_ccm4309_aes_alg;
+extern struct shash_alg nx_shash_aes_xcbc_alg;
+extern struct shash_alg nx_shash_sha512_alg;
+extern struct shash_alg nx_shash_sha256_alg;
+
+extern struct nx_crypto_driver nx_driver;
+
+#define SCATTERWALK_TO_SG	1
+#define SCATTERWALK_FROM_SG	0
+
+#endif
diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h
new file mode 100644
index 0000000..493f849
--- /dev/null
+++ b/drivers/crypto/nx/nx_csbcpb.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NX_CSBCPB_H__
+#define __NX_CSBCPB_H__
+
+struct cop_symcpb_aes_ecb {
+	u8 key[32];
+	u8 __rsvd[80];
+} __packed;
+
+struct cop_symcpb_aes_cbc {
+	u8 iv[16];
+	u8 key[32];
+	u8 cv[16];
+	u32 spbc;
+	u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_gca {
+	u8 in_pat[16];
+	u8 key[32];
+	u8 out_pat[16];
+	u32 spbc;
+	u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_gcm {
+	u8 in_pat_or_aad[16];
+	u8 iv_or_cnt[16];
+	u64 bit_length_aad;
+	u64 bit_length_data;
+	u8 in_s0[16];
+	u8 key[32];
+	u8 __rsvd1[16];
+	u8 out_pat_or_mac[16];
+	u8 out_s0[16];
+	u8 out_cnt[16];
+	u32 spbc;
+	u8 __rsvd2[12];
+} __packed;
+
+struct cop_symcpb_aes_ctr {
+	u8 iv[16];
+	u8 key[32];
+	u8 cv[16];
+	u32 spbc;
+	u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_aes_cca {
+	u8 b0[16];
+	u8 b1[16];
+	u8 key[16];
+	u8 out_pat_or_b0[16];
+	u32 spbc;
+	u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_ccm {
+	u8 in_pat_or_b0[16];
+	u8 iv_or_ctr[16];
+	u8 in_s0[16];
+	u8 key[16];
+	u8 __rsvd1[48];
+	u8 out_pat_or_mac[16];
+	u8 out_s0[16];
+	u8 out_ctr[16];
+	u32 spbc;
+	u8 __rsvd2[12];
+} __packed;
+
+struct cop_symcpb_aes_xcbc {
+	u8 cv[16];
+	u8 key[16];
+	u8 __rsvd1[16];
+	u8 out_cv_mac[16];
+	u32 spbc;
+	u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_sha256 {
+	u64 message_bit_length;
+	u64 __rsvd1;
+	u8 input_partial_digest[32];
+	u8 message_digest[32];
+	u32 spbc;
+	u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_sha512 {
+	u64 message_bit_length_hi;
+	u64 message_bit_length_lo;
+	u8 input_partial_digest[64];
+	u8 __rsvd1[32];
+	u8 message_digest[64];
+	u32 spbc;
+	u8 __rsvd2[76];
+} __packed;
+
+#define NX_FDM_INTERMEDIATE		0x01
+#define NX_FDM_CONTINUATION		0x02
+#define NX_FDM_ENDE_ENCRYPT		0x80
+
+#define NX_CPB_FDM(c)			((c)->cpb.hdr.fdm)
+#define NX_CPB_KS_DS(c)			((c)->cpb.hdr.ks_ds)
+
+#define NX_CPB_KEY_SIZE(c)		(NX_CPB_KS_DS(c) >> 4)
+#define NX_CPB_SET_KEY_SIZE(c, x)	NX_CPB_KS_DS(c) |= ((x) << 4)
+#define NX_CPB_SET_DIGEST_SIZE(c, x)	NX_CPB_KS_DS(c) |= (x)
+
+struct cop_symcpb_header {
+	u8 mode;
+	u8 fdm;
+	u8 ks_ds;
+	u8 pad_byte;
+	u8 __rsvd[12];
+} __packed;
+
+struct cop_parameter_block {
+	struct cop_symcpb_header hdr;
+	union {
+		struct cop_symcpb_aes_ecb  aes_ecb;
+		struct cop_symcpb_aes_cbc  aes_cbc;
+		struct cop_symcpb_aes_gca  aes_gca;
+		struct cop_symcpb_aes_gcm  aes_gcm;
+		struct cop_symcpb_aes_cca  aes_cca;
+		struct cop_symcpb_aes_ccm  aes_ccm;
+		struct cop_symcpb_aes_ctr  aes_ctr;
+		struct cop_symcpb_aes_xcbc aes_xcbc;
+		struct cop_symcpb_sha256   sha256;
+		struct cop_symcpb_sha512   sha512;
+	};
+} __packed;
+
+#define NX_CSB_VALID_BIT	0x80
+
+/* co-processor status block */
+struct cop_status_block {
+	u8 valid;
+	u8 crb_seq_number;
+	u8 completion_code;
+	u8 completion_extension;
+	u32 processed_byte_count;
+	u64 address;
+} __packed;
+
+/* Nest accelerator workbook section 4.4 */
+struct nx_csbcpb {
+	unsigned char __rsvd[112];
+	struct cop_status_block csb;
+	struct cop_parameter_block cpb;
+} __packed;
+
+/* nx_csbcpb related definitions */
+#define NX_MODE_AES_ECB			0
+#define NX_MODE_AES_CBC			1
+#define NX_MODE_AES_GMAC		2
+#define NX_MODE_AES_GCA			3
+#define NX_MODE_AES_GCM			4
+#define NX_MODE_AES_CCA			5
+#define NX_MODE_AES_CCM			6
+#define NX_MODE_AES_CTR			7
+#define NX_MODE_AES_XCBC_MAC		20
+#define NX_MODE_SHA			0
+#define NX_MODE_SHA_HMAC		1
+#define NX_MODE_AES_CBC_HMAC_ETA	8
+#define NX_MODE_AES_CBC_HMAC_ATE	9
+#define NX_MODE_AES_CBC_HMAC_EAA	10
+#define NX_MODE_AES_CTR_HMAC_ETA	12
+#define NX_MODE_AES_CTR_HMAC_ATE	13
+#define NX_MODE_AES_CTR_HMAC_EAA	14
+
+#define NX_FDM_CI_FULL		0
+#define NX_FDM_CI_FIRST		1
+#define NX_FDM_CI_LAST		2
+#define NX_FDM_CI_MIDDLE	3
+
+#define NX_FDM_PR_NONE		0
+#define NX_FDM_PR_PAD		1
+
+#define NX_KS_AES_128		1
+#define NX_KS_AES_192		2
+#define NX_KS_AES_256		3
+
+#define NX_DS_SHA256		2
+#define NX_DS_SHA512		3
+
+#define NX_FC_AES		0
+#define NX_FC_SHA		2
+#define NX_FC_AES_HMAC		6
+
+#define NX_MAX_FC		(NX_FC_AES_HMAC + 1)
+#define NX_MAX_MODE		(NX_MODE_AES_XCBC_MAC + 1)
+
+#define HCOP_FC_AES          NX_FC_AES
+#define HCOP_FC_SHA          NX_FC_SHA
+#define HCOP_FC_AES_HMAC     NX_FC_AES_HMAC
+
+/* indices into the array of algorithm properties */
+#define NX_PROPS_AES_128		0
+#define NX_PROPS_AES_192		1
+#define NX_PROPS_AES_256		2
+#define NX_PROPS_SHA256			1
+#define NX_PROPS_SHA512			2
+
+#endif
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
new file mode 100644
index 0000000..7ab2e8d
--- /dev/null
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -0,0 +1,103 @@
+/**
+ * debugfs routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * debugfs
+ *
+ * For documentation on these attributes, please see:
+ *
+ * Documentation/ABI/testing/debugfs-pfo-nx-crypto
+ */
+
+int nx_debugfs_init(struct nx_crypto_driver *drv)
+{
+	struct nx_debugfs *dfs = &drv->dfs;
+
+	dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL);
+
+	dfs->dfs_aes_ops =
+		debugfs_create_u32("aes_ops",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root, (u32 *)&drv->stats.aes_ops);
+	dfs->dfs_sha256_ops =
+		debugfs_create_u32("sha256_ops",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root,
+				   (u32 *)&drv->stats.sha256_ops);
+	dfs->dfs_sha512_ops =
+		debugfs_create_u32("sha512_ops",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root,
+				   (u32 *)&drv->stats.sha512_ops);
+	dfs->dfs_aes_bytes =
+		debugfs_create_u64("aes_bytes",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root,
+				   (u64 *)&drv->stats.aes_bytes);
+	dfs->dfs_sha256_bytes =
+		debugfs_create_u64("sha256_bytes",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root,
+				   (u64 *)&drv->stats.sha256_bytes);
+	dfs->dfs_sha512_bytes =
+		debugfs_create_u64("sha512_bytes",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root,
+				   (u64 *)&drv->stats.sha512_bytes);
+	dfs->dfs_errors =
+		debugfs_create_u32("errors",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root, (u32 *)&drv->stats.errors);
+	dfs->dfs_last_error =
+		debugfs_create_u32("last_error",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root,
+				   (u32 *)&drv->stats.last_error);
+	dfs->dfs_last_error_pid =
+		debugfs_create_u32("last_error_pid",
+				   S_IRUSR | S_IRGRP | S_IROTH,
+				   dfs->dfs_root,
+				   (u32 *)&drv->stats.last_error_pid);
+	return 0;
+}
+
+void
+nx_debugfs_fini(struct nx_crypto_driver *drv)
+{
+	debugfs_remove_recursive(drv->dfs.dfs_root);
+}
+
+#endif
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
new file mode 100644
index 0000000..0cc3b65
--- /dev/null
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -0,0 +1,409 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP AES GCM HW acceleration.
+ *
+ * Copyright (c) 2016 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/interrupt.h>
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/aead.h>
+
+#include "omap-crypto.h"
+#include "omap-aes.h"
+
+static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
+				     struct aead_request *req);
+
+static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
+{
+	struct aead_request *req = dd->aead_req;
+
+	dd->flags &= ~FLAGS_BUSY;
+	dd->in_sg = NULL;
+	dd->out_sg = NULL;
+
+	req->base.complete(&req->base, ret);
+}
+
+static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
+{
+	u8 *tag;
+	int alen, clen, i, ret = 0, nsg;
+	struct omap_aes_reqctx *rctx;
+
+	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
+	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
+	rctx = aead_request_ctx(dd->aead_req);
+
+	nsg = !!(dd->assoc_len && dd->total);
+
+	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
+			       DMA_FROM_DEVICE);
+	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
+	omap_aes_crypt_dma_stop(dd);
+
+	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
+			    dd->aead_req->assoclen, dd->total,
+			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+
+	if (dd->flags & FLAGS_ENCRYPT)
+		scatterwalk_map_and_copy(rctx->auth_tag,
+					 dd->aead_req->dst,
+					 dd->total + dd->aead_req->assoclen,
+					 dd->authsize, 1);
+
+	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
+			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
+
+	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
+			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
+
+	if (!(dd->flags & FLAGS_ENCRYPT)) {
+		tag = (u8 *)rctx->auth_tag;
+		for (i = 0; i < dd->authsize; i++) {
+			if (tag[i]) {
+				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
+				ret = -EBADMSG;
+			}
+		}
+	}
+
+	omap_aes_gcm_finish_req(dd, ret);
+	omap_aes_gcm_handle_queue(dd, NULL);
+}
+
+static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
+				     struct aead_request *req)
+{
+	int alen, clen, cryptlen, assoclen, ret;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	unsigned int authlen = crypto_aead_authsize(aead);
+	struct scatterlist *tmp, sg_arr[2];
+	int nsg;
+	u16 flags;
+
+	assoclen = req->assoclen;
+	cryptlen = req->cryptlen;
+
+	if (dd->flags & FLAGS_RFC4106_GCM)
+		assoclen -= 8;
+
+	if (!(dd->flags & FLAGS_ENCRYPT))
+		cryptlen -= authlen;
+
+	alen = ALIGN(assoclen, AES_BLOCK_SIZE);
+	clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
+
+	nsg = !!(assoclen && cryptlen);
+
+	omap_aes_clear_copy_flags(dd);
+
+	sg_init_table(dd->in_sgl, nsg + 1);
+	if (assoclen) {
+		tmp = req->src;
+		ret = omap_crypto_align_sg(&tmp, assoclen,
+					   AES_BLOCK_SIZE, dd->in_sgl,
+					   OMAP_CRYPTO_COPY_DATA |
+					   OMAP_CRYPTO_ZERO_BUF |
+					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
+					   FLAGS_ASSOC_DATA_ST_SHIFT,
+					   &dd->flags);
+	}
+
+	if (cryptlen) {
+		tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
+
+		ret = omap_crypto_align_sg(&tmp, cryptlen,
+					   AES_BLOCK_SIZE, &dd->in_sgl[nsg],
+					   OMAP_CRYPTO_COPY_DATA |
+					   OMAP_CRYPTO_ZERO_BUF |
+					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
+					   FLAGS_IN_DATA_ST_SHIFT,
+					   &dd->flags);
+	}
+
+	dd->in_sg = dd->in_sgl;
+	dd->total = cryptlen;
+	dd->assoc_len = assoclen;
+	dd->authsize = authlen;
+
+	dd->out_sg = req->dst;
+	dd->orig_out = req->dst;
+
+	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
+
+	flags = 0;
+	if (req->src == req->dst || dd->out_sg == sg_arr)
+		flags |= OMAP_CRYPTO_FORCE_COPY;
+
+	ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
+				   AES_BLOCK_SIZE, &dd->out_sgl,
+				   flags,
+				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
+	if (ret)
+		return ret;
+
+	dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
+	dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
+
+	return 0;
+}
+
+static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
+{
+	struct omap_aes_gcm_result *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+
+static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
+{
+	struct scatterlist iv_sg, tag_sg;
+	struct skcipher_request *sk_req;
+	struct omap_aes_gcm_result result;
+	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	int ret = 0;
+
+	sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
+	if (!sk_req) {
+		pr_err("skcipher: Failed to allocate request\n");
+		return -ENOMEM;
+	}
+
+	init_completion(&result.completion);
+
+	sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
+	sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
+	skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				      omap_aes_gcm_complete, &result);
+	ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
+	skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
+				   NULL);
+	ret = crypto_skcipher_encrypt(sk_req);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINPROGRESS:
+	case -EBUSY:
+		ret = wait_for_completion_interruptible(&result.completion);
+		if (!ret) {
+			ret = result.err;
+			if (!ret) {
+				reinit_completion(&result.completion);
+				break;
+			}
+		}
+		/* fall through */
+	default:
+		pr_err("Encryption of IV failed for GCM mode\n");
+		break;
+	}
+
+	skcipher_request_free(sk_req);
+	return ret;
+}
+
+void omap_aes_gcm_dma_out_callback(void *data)
+{
+	struct omap_aes_dev *dd = data;
+	struct omap_aes_reqctx *rctx;
+	int i, val;
+	u32 *auth_tag, tag[4];
+
+	if (!(dd->flags & FLAGS_ENCRYPT))
+		scatterwalk_map_and_copy(tag, dd->aead_req->src,
+					 dd->total + dd->aead_req->assoclen,
+					 dd->authsize, 0);
+
+	rctx = aead_request_ctx(dd->aead_req);
+	auth_tag = (u32 *)rctx->auth_tag;
+	for (i = 0; i < 4; i++) {
+		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
+		auth_tag[i] = val ^ auth_tag[i];
+		if (!(dd->flags & FLAGS_ENCRYPT))
+			auth_tag[i] = auth_tag[i] ^ tag[i];
+	}
+
+	omap_aes_gcm_done_task(dd);
+}
+
+static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
+				     struct aead_request *req)
+{
+	struct omap_aes_ctx *ctx;
+	struct aead_request *backlog;
+	struct omap_aes_reqctx *rctx;
+	unsigned long flags;
+	int err, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = aead_enqueue_request(&dd->aead_queue, req);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+
+	backlog = aead_get_backlog(&dd->aead_queue);
+	req = aead_dequeue_request(&dd->aead_queue);
+	if (req)
+		dd->flags |= FLAGS_BUSY;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!req)
+		return ret;
+
+	if (backlog)
+		backlog->base.complete(&backlog->base, -EINPROGRESS);
+
+	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	rctx = aead_request_ctx(req);
+
+	dd->ctx = ctx;
+	rctx->dd = dd;
+	dd->aead_req = req;
+
+	rctx->mode &= FLAGS_MODE_MASK;
+	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+	err = omap_aes_gcm_copy_buffers(dd, req);
+	if (err)
+		return err;
+
+	err = omap_aes_write_ctrl(dd);
+	if (!err)
+		err = omap_aes_crypt_dma_start(dd);
+
+	if (err) {
+		omap_aes_gcm_finish_req(dd, err);
+		omap_aes_gcm_handle_queue(dd, NULL);
+	}
+
+	return ret;
+}
+
+static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
+{
+	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	unsigned int authlen = crypto_aead_authsize(aead);
+	struct omap_aes_dev *dd;
+	__be32 counter = cpu_to_be32(1);
+	int err, assoclen;
+
+	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
+	memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
+
+	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
+	if (err)
+		return err;
+
+	if (mode & FLAGS_RFC4106_GCM)
+		assoclen = req->assoclen - 8;
+	else
+		assoclen = req->assoclen;
+	if (assoclen + req->cryptlen == 0) {
+		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
+					 1);
+		return 0;
+	}
+
+	dd = omap_aes_find_dev(rctx);
+	if (!dd)
+		return -ENODEV;
+	rctx->mode = mode;
+
+	return omap_aes_gcm_handle_queue(dd, req);
+}
+
+int omap_aes_gcm_encrypt(struct aead_request *req)
+{
+	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+
+	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
+	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
+}
+
+int omap_aes_gcm_decrypt(struct aead_request *req)
+{
+	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+
+	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
+	return omap_aes_gcm_crypt(req, FLAGS_GCM);
+}
+
+int omap_aes_4106gcm_encrypt(struct aead_request *req)
+{
+	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+
+	memcpy(rctx->iv, ctx->nonce, 4);
+	memcpy(rctx->iv + 4, req->iv, 8);
+	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
+				  FLAGS_RFC4106_GCM);
+}
+
+int omap_aes_4106gcm_decrypt(struct aead_request *req)
+{
+	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
+
+	memcpy(rctx->iv, ctx->nonce, 4);
+	memcpy(rctx->iv + 4, req->iv, 8);
+	return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
+}
+
+int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256)
+		return -EINVAL;
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+			    unsigned int keylen)
+{
+	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (keylen < 4)
+		return -EINVAL;
+
+	keylen -= 4;
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256)
+		return -EINVAL;
+
+	memcpy(ctx->key, key, keylen);
+	memcpy(ctx->nonce, key + keylen, 4);
+	ctx->keylen = keylen;
+
+	return 0;
+}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
new file mode 100644
index 0000000..9019f6b
--- /dev/null
+++ b/drivers/crypto/omap-aes.c
@@ -0,0 +1,1351 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP AES HW acceleration.
+ *
+ * Copyright (c) 2010 Nokia Corporation
+ * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ * Copyright (c) 2011 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "%20s: " fmt, __func__
+#define prn(num) pr_debug(#num "=%d\n", num)
+#define prx(num) pr_debug(#num "=%x\n", num)
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/aead.h>
+
+#include "omap-crypto.h"
+#include "omap-aes.h"
+
+/* keep registered devices data here */
+static LIST_HEAD(dev_list);
+static DEFINE_SPINLOCK(list_lock);
+
+static int aes_fallback_sz = 200;
+
+#ifdef DEBUG
+#define omap_aes_read(dd, offset)				\
+({								\
+	int _read_ret;						\
+	_read_ret = __raw_readl(dd->io_base + offset);		\
+	pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n",	\
+		 offset, _read_ret);				\
+	_read_ret;						\
+})
+#else
+inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
+{
+	return __raw_readl(dd->io_base + offset);
+}
+#endif
+
+#ifdef DEBUG
+#define omap_aes_write(dd, offset, value)				\
+	do {								\
+		pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n",	\
+			 offset, value);				\
+		__raw_writel(value, dd->io_base + offset);		\
+	} while (0)
+#else
+inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
+				  u32 value)
+{
+	__raw_writel(value, dd->io_base + offset);
+}
+#endif
+
+static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
+					u32 value, u32 mask)
+{
+	u32 val;
+
+	val = omap_aes_read(dd, offset);
+	val &= ~mask;
+	val |= value;
+	omap_aes_write(dd, offset, val);
+}
+
+static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
+					u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		omap_aes_write(dd, offset, *value);
+}
+
+static int omap_aes_hw_init(struct omap_aes_dev *dd)
+{
+	int err;
+
+	if (!(dd->flags & FLAGS_INIT)) {
+		dd->flags |= FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	err = pm_runtime_get_sync(dd->dev);
+	if (err < 0) {
+		dev_err(dd->dev, "failed to get sync: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+void omap_aes_clear_copy_flags(struct omap_aes_dev *dd)
+{
+	dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT);
+	dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT);
+	dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT);
+}
+
+int omap_aes_write_ctrl(struct omap_aes_dev *dd)
+{
+	struct omap_aes_reqctx *rctx;
+	unsigned int key32;
+	int i, err;
+	u32 val;
+
+	err = omap_aes_hw_init(dd);
+	if (err)
+		return err;
+
+	key32 = dd->ctx->keylen / sizeof(u32);
+
+	/* RESET the key as previous HASH keys should not get affected*/
+	if (dd->flags & FLAGS_GCM)
+		for (i = 0; i < 0x40; i = i + 4)
+			omap_aes_write(dd, i, 0x0);
+
+	for (i = 0; i < key32; i++) {
+		omap_aes_write(dd, AES_REG_KEY(dd, i),
+			__le32_to_cpu(dd->ctx->key[i]));
+	}
+
+	if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
+		omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
+
+	if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
+		rctx = aead_request_ctx(dd->aead_req);
+		omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4);
+	}
+
+	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
+	if (dd->flags & FLAGS_CBC)
+		val |= AES_REG_CTRL_CBC;
+
+	if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
+		val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
+
+	if (dd->flags & FLAGS_GCM)
+		val |= AES_REG_CTRL_GCM;
+
+	if (dd->flags & FLAGS_ENCRYPT)
+		val |= AES_REG_CTRL_DIRECTION;
+
+	omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
+
+	return 0;
+}
+
+static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
+{
+	u32 mask, val;
+
+	val = dd->pdata->dma_start;
+
+	if (dd->dma_lch_out != NULL)
+		val |= dd->pdata->dma_enable_out;
+	if (dd->dma_lch_in != NULL)
+		val |= dd->pdata->dma_enable_in;
+
+	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+	       dd->pdata->dma_start;
+
+	omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
+
+}
+
+static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
+{
+	omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
+	omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
+	if (dd->flags & FLAGS_GCM)
+		omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
+
+	omap_aes_dma_trigger_omap2(dd, length);
+}
+
+static void omap_aes_dma_stop(struct omap_aes_dev *dd)
+{
+	u32 mask;
+
+	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+	       dd->pdata->dma_start;
+
+	omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
+}
+
+struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx)
+{
+	struct omap_aes_dev *dd;
+
+	spin_lock_bh(&list_lock);
+	dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
+	list_move_tail(&dd->list, &dev_list);
+	rctx->dd = dd;
+	spin_unlock_bh(&list_lock);
+
+	return dd;
+}
+
+static void omap_aes_dma_out_callback(void *data)
+{
+	struct omap_aes_dev *dd = data;
+
+	/* dma_lch_out - completed */
+	tasklet_schedule(&dd->done_task);
+}
+
+static int omap_aes_dma_init(struct omap_aes_dev *dd)
+{
+	int err;
+
+	dd->dma_lch_out = NULL;
+	dd->dma_lch_in = NULL;
+
+	dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
+	if (IS_ERR(dd->dma_lch_in)) {
+		dev_err(dd->dev, "Unable to request in DMA channel\n");
+		return PTR_ERR(dd->dma_lch_in);
+	}
+
+	dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
+	if (IS_ERR(dd->dma_lch_out)) {
+		dev_err(dd->dev, "Unable to request out DMA channel\n");
+		err = PTR_ERR(dd->dma_lch_out);
+		goto err_dma_out;
+	}
+
+	return 0;
+
+err_dma_out:
+	dma_release_channel(dd->dma_lch_in);
+
+	return err;
+}
+
+static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
+{
+	if (dd->pio_only)
+		return;
+
+	dma_release_channel(dd->dma_lch_out);
+	dma_release_channel(dd->dma_lch_in);
+}
+
+static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
+			      struct scatterlist *in_sg,
+			      struct scatterlist *out_sg,
+			      int in_sg_len, int out_sg_len)
+{
+	struct dma_async_tx_descriptor *tx_in, *tx_out;
+	struct dma_slave_config cfg;
+	int ret;
+
+	if (dd->pio_only) {
+		scatterwalk_start(&dd->in_walk, dd->in_sg);
+		scatterwalk_start(&dd->out_walk, dd->out_sg);
+
+		/* Enable DATAIN interrupt and let it take
+		   care of the rest */
+		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
+		return 0;
+	}
+
+	dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
+
+	memset(&cfg, 0, sizeof(cfg));
+
+	cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
+	cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
+	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.src_maxburst = DST_MAXBURST;
+	cfg.dst_maxburst = DST_MAXBURST;
+
+	/* IN */
+	ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
+	if (ret) {
+		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+			ret);
+		return ret;
+	}
+
+	tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
+					DMA_MEM_TO_DEV,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx_in) {
+		dev_err(dd->dev, "IN prep_slave_sg() failed\n");
+		return -EINVAL;
+	}
+
+	/* No callback necessary */
+	tx_in->callback_param = dd;
+
+	/* OUT */
+	ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+	if (ret) {
+		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+			ret);
+		return ret;
+	}
+
+	tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
+					DMA_DEV_TO_MEM,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx_out) {
+		dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+		return -EINVAL;
+	}
+
+	if (dd->flags & FLAGS_GCM)
+		tx_out->callback = omap_aes_gcm_dma_out_callback;
+	else
+		tx_out->callback = omap_aes_dma_out_callback;
+	tx_out->callback_param = dd;
+
+	dmaengine_submit(tx_in);
+	dmaengine_submit(tx_out);
+
+	dma_async_issue_pending(dd->dma_lch_in);
+	dma_async_issue_pending(dd->dma_lch_out);
+
+	/* start DMA */
+	dd->pdata->trigger(dd, dd->total);
+
+	return 0;
+}
+
+int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
+{
+	int err;
+
+	pr_debug("total: %d\n", dd->total);
+
+	if (!dd->pio_only) {
+		err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
+				 DMA_TO_DEVICE);
+		if (!err) {
+			dev_err(dd->dev, "dma_map_sg() error\n");
+			return -EINVAL;
+		}
+
+		err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+				 DMA_FROM_DEVICE);
+		if (!err) {
+			dev_err(dd->dev, "dma_map_sg() error\n");
+			return -EINVAL;
+		}
+	}
+
+	err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
+				 dd->out_sg_len);
+	if (err && !dd->pio_only) {
+		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+			     DMA_FROM_DEVICE);
+	}
+
+	return err;
+}
+
+static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
+{
+	struct ablkcipher_request *req = dd->req;
+
+	pr_debug("err: %d\n", err);
+
+	crypto_finalize_ablkcipher_request(dd->engine, req, err);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+}
+
+int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
+{
+	pr_debug("total: %d\n", dd->total);
+
+	omap_aes_dma_stop(dd);
+
+
+	return 0;
+}
+
+static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+				 struct ablkcipher_request *req)
+{
+	if (req)
+		return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+
+	return 0;
+}
+
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+				void *areq)
+{
+	struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
+	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct omap_aes_dev *dd = rctx->dd;
+	int ret;
+	u16 flags;
+
+	if (!dd)
+		return -ENODEV;
+
+	/* assign new request to device */
+	dd->req = req;
+	dd->total = req->nbytes;
+	dd->total_save = req->nbytes;
+	dd->in_sg = req->src;
+	dd->out_sg = req->dst;
+	dd->orig_out = req->dst;
+
+	flags = OMAP_CRYPTO_COPY_DATA;
+	if (req->src == req->dst)
+		flags |= OMAP_CRYPTO_FORCE_COPY;
+
+	ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE,
+				   dd->in_sgl, flags,
+				   FLAGS_IN_DATA_ST_SHIFT, &dd->flags);
+	if (ret)
+		return ret;
+
+	ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE,
+				   &dd->out_sgl, 0,
+				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
+	if (ret)
+		return ret;
+
+	dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
+	if (dd->in_sg_len < 0)
+		return dd->in_sg_len;
+
+	dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
+	if (dd->out_sg_len < 0)
+		return dd->out_sg_len;
+
+	rctx->mode &= FLAGS_MODE_MASK;
+	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+	dd->ctx = ctx;
+	rctx->dd = dd;
+
+	return omap_aes_write_ctrl(dd);
+}
+
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+			      void *areq)
+{
+	struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
+	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct omap_aes_dev *dd = rctx->dd;
+
+	if (!dd)
+		return -ENODEV;
+
+	return omap_aes_crypt_dma_start(dd);
+}
+
+static void omap_aes_done_task(unsigned long data)
+{
+	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+
+	pr_debug("enter done_task\n");
+
+	if (!dd->pio_only) {
+		dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
+				       DMA_FROM_DEVICE);
+		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+			     DMA_FROM_DEVICE);
+		omap_aes_crypt_dma_stop(dd);
+	}
+
+	omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
+			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
+
+	omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
+			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+
+	omap_aes_finish_req(dd, 0);
+
+	pr_debug("exit\n");
+}
+
+static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct omap_aes_dev *dd;
+	int ret;
+
+	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+		  !!(mode & FLAGS_ENCRYPT),
+		  !!(mode & FLAGS_CBC));
+
+	if (req->nbytes < aes_fallback_sz) {
+		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+		skcipher_request_set_tfm(subreq, ctx->fallback);
+		skcipher_request_set_callback(subreq, req->base.flags, NULL,
+					      NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+					   req->nbytes, req->info);
+
+		if (mode & FLAGS_ENCRYPT)
+			ret = crypto_skcipher_encrypt(subreq);
+		else
+			ret = crypto_skcipher_decrypt(subreq);
+
+		skcipher_request_zero(subreq);
+		return ret;
+	}
+	dd = omap_aes_find_dev(rctx);
+	if (!dd)
+		return -ENODEV;
+
+	rctx->mode = mode;
+
+	return omap_aes_handle_queue(dd, req);
+}
+
+/* ********************** ALG API ************************************ */
+
+static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	int ret;
+
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+		   keylen != AES_KEYSIZE_256)
+		return -EINVAL;
+
+	pr_debug("enter, keylen: %d\n", keylen);
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+						 CRYPTO_TFM_REQ_MASK);
+
+	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	if (!ret)
+		return 0;
+
+	return 0;
+}
+
+static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return omap_aes_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return omap_aes_crypt(req, 0);
+}
+
+static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return omap_aes_crypt(req, FLAGS_CBC);
+}
+
+static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+	return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
+}
+
+static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+	return omap_aes_crypt(req, FLAGS_CTR);
+}
+
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+				void *req);
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+			      void *req);
+
+static int omap_aes_cra_init(struct crypto_tfm *tfm)
+{
+	const char *name = crypto_tfm_alg_name(tfm);
+	const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+	struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_skcipher *blk;
+
+	blk = crypto_alloc_skcipher(name, 0, flags);
+	if (IS_ERR(blk))
+		return PTR_ERR(blk);
+
+	ctx->fallback = blk;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
+
+	ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
+	ctx->enginectx.op.unprepare_request = NULL;
+	ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
+
+	return 0;
+}
+
+static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+	struct omap_aes_dev *dd = NULL;
+	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+	int err;
+
+	/* Find AES device, currently picks the first device */
+	spin_lock_bh(&list_lock);
+	list_for_each_entry(dd, &dev_list, list) {
+		break;
+	}
+	spin_unlock_bh(&list_lock);
+
+	err = pm_runtime_get_sync(dd->dev);
+	if (err < 0) {
+		dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
+			__func__, err);
+		return err;
+	}
+
+	tfm->reqsize = sizeof(struct omap_aes_reqctx);
+	ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+	if (IS_ERR(ctx->ctr)) {
+		pr_warn("could not load aes driver for encrypting IV\n");
+		return PTR_ERR(ctx->ctr);
+	}
+
+	return 0;
+}
+
+static void omap_aes_cra_exit(struct crypto_tfm *tfm)
+{
+	struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->fallback)
+		crypto_free_skcipher(ctx->fallback);
+
+	ctx->fallback = NULL;
+}
+
+static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
+{
+	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+	omap_aes_cra_exit(crypto_aead_tfm(tfm));
+
+	if (ctx->ctr)
+		crypto_free_skcipher(ctx->ctr);
+}
+
+/* ********************** ALGS ************************************ */
+
+static struct crypto_alg algs_ecb_cbc[] = {
+{
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "ecb-aes-omap",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= omap_aes_cra_init,
+	.cra_exit		= omap_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.setkey		= omap_aes_setkey,
+		.encrypt	= omap_aes_ecb_encrypt,
+		.decrypt	= omap_aes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "cbc-aes-omap",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= omap_aes_cra_init,
+	.cra_exit		= omap_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= omap_aes_setkey,
+		.encrypt	= omap_aes_cbc_encrypt,
+		.decrypt	= omap_aes_cbc_decrypt,
+	}
+}
+};
+
+static struct crypto_alg algs_ctr[] = {
+{
+	.cra_name		= "ctr(aes)",
+	.cra_driver_name	= "ctr-aes-omap",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= omap_aes_cra_init,
+	.cra_exit		= omap_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.geniv		= "eseqiv",
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= omap_aes_setkey,
+		.encrypt	= omap_aes_ctr_encrypt,
+		.decrypt	= omap_aes_ctr_decrypt,
+	}
+} ,
+};
+
+static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
+	{
+		.algs_list	= algs_ecb_cbc,
+		.size		= ARRAY_SIZE(algs_ecb_cbc),
+	},
+};
+
+static struct aead_alg algs_aead_gcm[] = {
+{
+	.base = {
+		.cra_name		= "gcm(aes)",
+		.cra_driver_name	= "gcm-aes-omap",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct omap_aes_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+	.init		= omap_aes_gcm_cra_init,
+	.exit		= omap_aes_gcm_cra_exit,
+	.ivsize		= GCM_AES_IV_SIZE,
+	.maxauthsize	= AES_BLOCK_SIZE,
+	.setkey		= omap_aes_gcm_setkey,
+	.encrypt	= omap_aes_gcm_encrypt,
+	.decrypt	= omap_aes_gcm_decrypt,
+},
+{
+	.base = {
+		.cra_name		= "rfc4106(gcm(aes))",
+		.cra_driver_name	= "rfc4106-gcm-aes-omap",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct omap_aes_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+	.init		= omap_aes_gcm_cra_init,
+	.exit		= omap_aes_gcm_cra_exit,
+	.maxauthsize	= AES_BLOCK_SIZE,
+	.ivsize		= GCM_RFC4106_IV_SIZE,
+	.setkey		= omap_aes_4106gcm_setkey,
+	.encrypt	= omap_aes_4106gcm_encrypt,
+	.decrypt	= omap_aes_4106gcm_decrypt,
+},
+};
+
+static struct omap_aes_aead_algs omap_aes_aead_info = {
+	.algs_list	=	algs_aead_gcm,
+	.size		=	ARRAY_SIZE(algs_aead_gcm),
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
+	.algs_info	= omap_aes_algs_info_ecb_cbc,
+	.algs_info_size	= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
+	.trigger	= omap_aes_dma_trigger_omap2,
+	.key_ofs	= 0x1c,
+	.iv_ofs		= 0x20,
+	.ctrl_ofs	= 0x30,
+	.data_ofs	= 0x34,
+	.rev_ofs	= 0x44,
+	.mask_ofs	= 0x48,
+	.dma_enable_in	= BIT(2),
+	.dma_enable_out	= BIT(3),
+	.dma_start	= BIT(5),
+	.major_mask	= 0xf0,
+	.major_shift	= 4,
+	.minor_mask	= 0x0f,
+	.minor_shift	= 0,
+};
+
+#ifdef CONFIG_OF
+static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
+	{
+		.algs_list	= algs_ecb_cbc,
+		.size		= ARRAY_SIZE(algs_ecb_cbc),
+	},
+	{
+		.algs_list	= algs_ctr,
+		.size		= ARRAY_SIZE(algs_ctr),
+	},
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
+	.algs_info	= omap_aes_algs_info_ecb_cbc_ctr,
+	.algs_info_size	= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
+	.trigger	= omap_aes_dma_trigger_omap2,
+	.key_ofs	= 0x1c,
+	.iv_ofs		= 0x20,
+	.ctrl_ofs	= 0x30,
+	.data_ofs	= 0x34,
+	.rev_ofs	= 0x44,
+	.mask_ofs	= 0x48,
+	.dma_enable_in	= BIT(2),
+	.dma_enable_out	= BIT(3),
+	.dma_start	= BIT(5),
+	.major_mask	= 0xf0,
+	.major_shift	= 4,
+	.minor_mask	= 0x0f,
+	.minor_shift	= 0,
+};
+
+static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
+	.algs_info	= omap_aes_algs_info_ecb_cbc_ctr,
+	.algs_info_size	= ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
+	.aead_algs_info	= &omap_aes_aead_info,
+	.trigger	= omap_aes_dma_trigger_omap4,
+	.key_ofs	= 0x3c,
+	.iv_ofs		= 0x40,
+	.ctrl_ofs	= 0x50,
+	.data_ofs	= 0x60,
+	.rev_ofs	= 0x80,
+	.mask_ofs	= 0x84,
+	.irq_status_ofs = 0x8c,
+	.irq_enable_ofs = 0x90,
+	.dma_enable_in	= BIT(5),
+	.dma_enable_out	= BIT(6),
+	.major_mask	= 0x0700,
+	.major_shift	= 8,
+	.minor_mask	= 0x003f,
+	.minor_shift	= 0,
+};
+
+static irqreturn_t omap_aes_irq(int irq, void *dev_id)
+{
+	struct omap_aes_dev *dd = dev_id;
+	u32 status, i;
+	u32 *src, *dst;
+
+	status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
+	if (status & AES_REG_IRQ_DATA_IN) {
+		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
+
+		BUG_ON(!dd->in_sg);
+
+		BUG_ON(_calc_walked(in) > dd->in_sg->length);
+
+		src = sg_virt(dd->in_sg) + _calc_walked(in);
+
+		for (i = 0; i < AES_BLOCK_WORDS; i++) {
+			omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
+
+			scatterwalk_advance(&dd->in_walk, 4);
+			if (dd->in_sg->length == _calc_walked(in)) {
+				dd->in_sg = sg_next(dd->in_sg);
+				if (dd->in_sg) {
+					scatterwalk_start(&dd->in_walk,
+							  dd->in_sg);
+					src = sg_virt(dd->in_sg) +
+					      _calc_walked(in);
+				}
+			} else {
+				src++;
+			}
+		}
+
+		/* Clear IRQ status */
+		status &= ~AES_REG_IRQ_DATA_IN;
+		omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
+
+		/* Enable DATA_OUT interrupt */
+		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
+
+	} else if (status & AES_REG_IRQ_DATA_OUT) {
+		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
+
+		BUG_ON(!dd->out_sg);
+
+		BUG_ON(_calc_walked(out) > dd->out_sg->length);
+
+		dst = sg_virt(dd->out_sg) + _calc_walked(out);
+
+		for (i = 0; i < AES_BLOCK_WORDS; i++) {
+			*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
+			scatterwalk_advance(&dd->out_walk, 4);
+			if (dd->out_sg->length == _calc_walked(out)) {
+				dd->out_sg = sg_next(dd->out_sg);
+				if (dd->out_sg) {
+					scatterwalk_start(&dd->out_walk,
+							  dd->out_sg);
+					dst = sg_virt(dd->out_sg) +
+					      _calc_walked(out);
+				}
+			} else {
+				dst++;
+			}
+		}
+
+		dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
+
+		/* Clear IRQ status */
+		status &= ~AES_REG_IRQ_DATA_OUT;
+		omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
+
+		if (!dd->total)
+			/* All bytes read! */
+			tasklet_schedule(&dd->done_task);
+		else
+			/* Enable DATA_IN interrupt for next block */
+			omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static const struct of_device_id omap_aes_of_match[] = {
+	{
+		.compatible	= "ti,omap2-aes",
+		.data		= &omap_aes_pdata_omap2,
+	},
+	{
+		.compatible	= "ti,omap3-aes",
+		.data		= &omap_aes_pdata_omap3,
+	},
+	{
+		.compatible	= "ti,omap4-aes",
+		.data		= &omap_aes_pdata_omap4,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, omap_aes_of_match);
+
+static int omap_aes_get_res_of(struct omap_aes_dev *dd,
+		struct device *dev, struct resource *res)
+{
+	struct device_node *node = dev->of_node;
+	int err = 0;
+
+	dd->pdata = of_device_get_match_data(dev);
+	if (!dd->pdata) {
+		dev_err(dev, "no compatible OF match\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	err = of_address_to_resource(node, 0, res);
+	if (err < 0) {
+		dev_err(dev, "can't translate OF node address\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+err:
+	return err;
+}
+#else
+static const struct of_device_id omap_aes_of_match[] = {
+	{},
+};
+
+static int omap_aes_get_res_of(struct omap_aes_dev *dd,
+		struct device *dev, struct resource *res)
+{
+	return -EINVAL;
+}
+#endif
+
+static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
+		struct platform_device *pdev, struct resource *res)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *r;
+	int err = 0;
+
+	/* Get the base address */
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto err;
+	}
+	memcpy(res, r, sizeof(*res));
+
+	/* Only OMAP2/3 can be non-DT */
+	dd->pdata = &omap_aes_pdata_omap2;
+
+err:
+	return err;
+}
+
+static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	return sprintf(buf, "%d\n", aes_fallback_sz);
+}
+
+static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	ssize_t status;
+	long value;
+
+	status = kstrtol(buf, 0, &value);
+	if (status)
+		return status;
+
+	/* HW accelerator only works with buffers > 9 */
+	if (value < 9) {
+		dev_err(dev, "minimum fallback size 9\n");
+		return -EINVAL;
+	}
+
+	aes_fallback_sz = value;
+
+	return size;
+}
+
+static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct omap_aes_dev *dd = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
+}
+
+static ssize_t queue_len_store(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t size)
+{
+	struct omap_aes_dev *dd;
+	ssize_t status;
+	long value;
+	unsigned long flags;
+
+	status = kstrtol(buf, 0, &value);
+	if (status)
+		return status;
+
+	if (value < 1)
+		return -EINVAL;
+
+	/*
+	 * Changing the queue size in fly is safe, if size becomes smaller
+	 * than current size, it will just not accept new entries until
+	 * it has shrank enough.
+	 */
+	spin_lock_bh(&list_lock);
+	list_for_each_entry(dd, &dev_list, list) {
+		spin_lock_irqsave(&dd->lock, flags);
+		dd->engine->queue.max_qlen = value;
+		dd->aead_queue.base.max_qlen = value;
+		spin_unlock_irqrestore(&dd->lock, flags);
+	}
+	spin_unlock_bh(&list_lock);
+
+	return size;
+}
+
+static DEVICE_ATTR_RW(queue_len);
+static DEVICE_ATTR_RW(fallback);
+
+static struct attribute *omap_aes_attrs[] = {
+	&dev_attr_queue_len.attr,
+	&dev_attr_fallback.attr,
+	NULL,
+};
+
+static struct attribute_group omap_aes_attr_group = {
+	.attrs = omap_aes_attrs,
+};
+
+static int omap_aes_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct omap_aes_dev *dd;
+	struct crypto_alg *algp;
+	struct aead_alg *aalg;
+	struct resource res;
+	int err = -ENOMEM, i, j, irq = -1;
+	u32 reg;
+
+	dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
+	if (dd == NULL) {
+		dev_err(dev, "unable to alloc data struct.\n");
+		goto err_data;
+	}
+	dd->dev = dev;
+	platform_set_drvdata(pdev, dd);
+
+	aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
+
+	err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
+			       omap_aes_get_res_pdev(dd, pdev, &res);
+	if (err)
+		goto err_res;
+
+	dd->io_base = devm_ioremap_resource(dev, &res);
+	if (IS_ERR(dd->io_base)) {
+		err = PTR_ERR(dd->io_base);
+		goto err_res;
+	}
+	dd->phys_base = res.start;
+
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+
+	pm_runtime_enable(dev);
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		dev_err(dev, "%s: failed to get_sync(%d)\n",
+			__func__, err);
+		goto err_res;
+	}
+
+	omap_aes_dma_stop(dd);
+
+	reg = omap_aes_read(dd, AES_REG_REV(dd));
+
+	pm_runtime_put_sync(dev);
+
+	dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
+		 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
+		 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
+
+	tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+
+	err = omap_aes_dma_init(dd);
+	if (err == -EPROBE_DEFER) {
+		goto err_irq;
+	} else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
+		dd->pio_only = 1;
+
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0) {
+			dev_err(dev, "can't get IRQ resource\n");
+			err = irq;
+			goto err_irq;
+		}
+
+		err = devm_request_irq(dev, irq, omap_aes_irq, 0,
+				dev_name(dev), dd);
+		if (err) {
+			dev_err(dev, "Unable to grab omap-aes IRQ\n");
+			goto err_irq;
+		}
+	}
+
+	spin_lock_init(&dd->lock);
+
+	INIT_LIST_HEAD(&dd->list);
+	spin_lock(&list_lock);
+	list_add_tail(&dd->list, &dev_list);
+	spin_unlock(&list_lock);
+
+	/* Initialize crypto engine */
+	dd->engine = crypto_engine_alloc_init(dev, 1);
+	if (!dd->engine) {
+		err = -ENOMEM;
+		goto err_engine;
+	}
+
+	err = crypto_engine_start(dd->engine);
+	if (err)
+		goto err_engine;
+
+	for (i = 0; i < dd->pdata->algs_info_size; i++) {
+		if (!dd->pdata->algs_info[i].registered) {
+			for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+				algp = &dd->pdata->algs_info[i].algs_list[j];
+
+				pr_debug("reg alg: %s\n", algp->cra_name);
+				INIT_LIST_HEAD(&algp->cra_list);
+
+				err = crypto_register_alg(algp);
+				if (err)
+					goto err_algs;
+
+				dd->pdata->algs_info[i].registered++;
+			}
+		}
+	}
+
+	if (dd->pdata->aead_algs_info &&
+	    !dd->pdata->aead_algs_info->registered) {
+		for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
+			aalg = &dd->pdata->aead_algs_info->algs_list[i];
+			algp = &aalg->base;
+
+			pr_debug("reg alg: %s\n", algp->cra_name);
+			INIT_LIST_HEAD(&algp->cra_list);
+
+			err = crypto_register_aead(aalg);
+			if (err)
+				goto err_aead_algs;
+
+			dd->pdata->aead_algs_info->registered++;
+		}
+	}
+
+	err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
+	if (err) {
+		dev_err(dev, "could not create sysfs device attrs\n");
+		goto err_aead_algs;
+	}
+
+	return 0;
+err_aead_algs:
+	for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
+		aalg = &dd->pdata->aead_algs_info->algs_list[i];
+		crypto_unregister_aead(aalg);
+	}
+err_algs:
+	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+			crypto_unregister_alg(
+					&dd->pdata->algs_info[i].algs_list[j]);
+
+err_engine:
+	if (dd->engine)
+		crypto_engine_exit(dd->engine);
+
+	omap_aes_dma_cleanup(dd);
+err_irq:
+	tasklet_kill(&dd->done_task);
+	pm_runtime_disable(dev);
+err_res:
+	dd = NULL;
+err_data:
+	dev_err(dev, "initialization failed.\n");
+	return err;
+}
+
+static int omap_aes_remove(struct platform_device *pdev)
+{
+	struct omap_aes_dev *dd = platform_get_drvdata(pdev);
+	struct aead_alg *aalg;
+	int i, j;
+
+	if (!dd)
+		return -ENODEV;
+
+	spin_lock(&list_lock);
+	list_del(&dd->list);
+	spin_unlock(&list_lock);
+
+	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+			crypto_unregister_alg(
+					&dd->pdata->algs_info[i].algs_list[j]);
+
+	for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
+		aalg = &dd->pdata->aead_algs_info->algs_list[i];
+		crypto_unregister_aead(aalg);
+	}
+
+	crypto_engine_exit(dd->engine);
+
+	tasklet_kill(&dd->done_task);
+	omap_aes_dma_cleanup(dd);
+	pm_runtime_disable(dd->dev);
+	dd = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_aes_suspend(struct device *dev)
+{
+	pm_runtime_put_sync(dev);
+	return 0;
+}
+
+static int omap_aes_resume(struct device *dev)
+{
+	pm_runtime_get_sync(dev);
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
+
+static struct platform_driver omap_aes_driver = {
+	.probe	= omap_aes_probe,
+	.remove	= omap_aes_remove,
+	.driver	= {
+		.name	= "omap-aes",
+		.pm	= &omap_aes_pm_ops,
+		.of_match_table	= omap_aes_of_match,
+	},
+};
+
+module_platform_driver(omap_aes_driver);
+
+MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dmitry Kasatkin");
+
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
new file mode 100644
index 0000000..fc3b46a
--- /dev/null
+++ b/drivers/crypto/omap-aes.h
@@ -0,0 +1,217 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP AES HW ACCELERATOR defines
+ *
+ * Copyright (c) 2015 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef __OMAP_AES_H__
+#define __OMAP_AES_H__
+
+#include <crypto/engine.h>
+
+#define DST_MAXBURST			4
+#define DMA_MIN				(DST_MAXBURST * sizeof(u32))
+
+#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
+
+/*
+ * OMAP TRM gives bitfields as start:end, where start is the higher bit
+ * number. For example 7:0
+ */
+#define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+
+#define AES_REG_KEY(dd, x)		((dd)->pdata->key_ofs - \
+						(((x) ^ 0x01) * 0x04))
+#define AES_REG_IV(dd, x)		((dd)->pdata->iv_ofs + ((x) * 0x04))
+
+#define AES_REG_CTRL(dd)		((dd)->pdata->ctrl_ofs)
+#define AES_REG_CTRL_CONTEXT_READY	BIT(31)
+#define AES_REG_CTRL_CTR_WIDTH_MASK	GENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32	0
+#define AES_REG_CTRL_CTR_WIDTH_64	BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96	BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128	GENMASK(8, 7)
+#define AES_REG_CTRL_GCM		GENMASK(17, 16)
+#define AES_REG_CTRL_CTR		BIT(6)
+#define AES_REG_CTRL_CBC		BIT(5)
+#define AES_REG_CTRL_KEY_SIZE		GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION		BIT(2)
+#define AES_REG_CTRL_INPUT_READY	BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY	BIT(0)
+#define AES_REG_CTRL_MASK		GENMASK(24, 2)
+
+#define AES_REG_C_LEN_0			0x54
+#define AES_REG_C_LEN_1			0x58
+#define AES_REG_A_LEN			0x5C
+
+#define AES_REG_DATA_N(dd, x)		((dd)->pdata->data_ofs + ((x) * 0x04))
+#define AES_REG_TAG_N(dd, x)		(0x70 + ((x) * 0x04))
+
+#define AES_REG_REV(dd)			((dd)->pdata->rev_ofs)
+
+#define AES_REG_MASK(dd)		((dd)->pdata->mask_ofs)
+#define AES_REG_MASK_SIDLE		BIT(6)
+#define AES_REG_MASK_START		BIT(5)
+#define AES_REG_MASK_DMA_OUT_EN		BIT(3)
+#define AES_REG_MASK_DMA_IN_EN		BIT(2)
+#define AES_REG_MASK_SOFTRESET		BIT(1)
+#define AES_REG_AUTOIDLE		BIT(0)
+
+#define AES_REG_LENGTH_N(x)		(0x54 + ((x) * 0x04))
+
+#define AES_REG_IRQ_STATUS(dd)         ((dd)->pdata->irq_status_ofs)
+#define AES_REG_IRQ_ENABLE(dd)         ((dd)->pdata->irq_enable_ofs)
+#define AES_REG_IRQ_DATA_IN            BIT(1)
+#define AES_REG_IRQ_DATA_OUT           BIT(2)
+#define DEFAULT_TIMEOUT		(5 * HZ)
+
+#define DEFAULT_AUTOSUSPEND_DELAY	1000
+
+#define FLAGS_MODE_MASK		0x001f
+#define FLAGS_ENCRYPT		BIT(0)
+#define FLAGS_CBC		BIT(1)
+#define FLAGS_CTR		BIT(2)
+#define FLAGS_GCM		BIT(3)
+#define FLAGS_RFC4106_GCM	BIT(4)
+
+#define FLAGS_INIT		BIT(5)
+#define FLAGS_FAST		BIT(6)
+#define FLAGS_BUSY		BIT(7)
+
+#define FLAGS_IN_DATA_ST_SHIFT	8
+#define FLAGS_OUT_DATA_ST_SHIFT	10
+#define FLAGS_ASSOC_DATA_ST_SHIFT	12
+
+#define AES_BLOCK_WORDS		(AES_BLOCK_SIZE >> 2)
+
+struct omap_aes_gcm_result {
+	struct completion completion;
+	int err;
+};
+
+struct omap_aes_ctx {
+	struct crypto_engine_ctx enginectx;
+	int		keylen;
+	u32		key[AES_KEYSIZE_256 / sizeof(u32)];
+	u8		nonce[4];
+	struct crypto_skcipher	*fallback;
+	struct crypto_skcipher	*ctr;
+};
+
+struct omap_aes_reqctx {
+	struct omap_aes_dev *dd;
+	unsigned long mode;
+	u8 iv[AES_BLOCK_SIZE];
+	u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)];
+};
+
+#define OMAP_AES_QUEUE_LENGTH	1
+#define OMAP_AES_CACHE_SIZE	0
+
+struct omap_aes_algs_info {
+	struct crypto_alg	*algs_list;
+	unsigned int		size;
+	unsigned int		registered;
+};
+
+struct omap_aes_aead_algs {
+	struct aead_alg	*algs_list;
+	unsigned int	size;
+	unsigned int	registered;
+};
+
+struct omap_aes_pdata {
+	struct omap_aes_algs_info	*algs_info;
+	unsigned int	algs_info_size;
+	struct omap_aes_aead_algs	*aead_algs_info;
+
+	void		(*trigger)(struct omap_aes_dev *dd, int length);
+
+	u32		key_ofs;
+	u32		iv_ofs;
+	u32		ctrl_ofs;
+	u32		data_ofs;
+	u32		rev_ofs;
+	u32		mask_ofs;
+	u32             irq_enable_ofs;
+	u32             irq_status_ofs;
+
+	u32		dma_enable_in;
+	u32		dma_enable_out;
+	u32		dma_start;
+
+	u32		major_mask;
+	u32		major_shift;
+	u32		minor_mask;
+	u32		minor_shift;
+};
+
+struct omap_aes_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	void __iomem		*io_base;
+	struct omap_aes_ctx	*ctx;
+	struct device		*dev;
+	unsigned long		flags;
+	int			err;
+
+	struct tasklet_struct	done_task;
+	struct aead_queue	aead_queue;
+	spinlock_t		lock;
+
+	struct ablkcipher_request	*req;
+	struct aead_request		*aead_req;
+	struct crypto_engine		*engine;
+
+	/*
+	 * total is used by PIO mode for book keeping so introduce
+	 * variable total_save as need it to calc page_order
+	 */
+	size_t				total;
+	size_t				total_save;
+	size_t				assoc_len;
+	size_t				authsize;
+
+	struct scatterlist		*in_sg;
+	struct scatterlist		*out_sg;
+
+	/* Buffers for copying for unaligned cases */
+	struct scatterlist		in_sgl[2];
+	struct scatterlist		out_sgl;
+	struct scatterlist		*orig_out;
+
+	struct scatter_walk		in_walk;
+	struct scatter_walk		out_walk;
+	struct dma_chan		*dma_lch_in;
+	struct dma_chan		*dma_lch_out;
+	int			in_sg_len;
+	int			out_sg_len;
+	int			pio_only;
+	const struct omap_aes_pdata	*pdata;
+};
+
+u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset);
+void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value);
+struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx);
+int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen);
+int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+			    unsigned int keylen);
+int omap_aes_gcm_encrypt(struct aead_request *req);
+int omap_aes_gcm_decrypt(struct aead_request *req);
+int omap_aes_4106gcm_encrypt(struct aead_request *req);
+int omap_aes_4106gcm_decrypt(struct aead_request *req);
+int omap_aes_write_ctrl(struct omap_aes_dev *dd);
+int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
+int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
+void omap_aes_gcm_dma_out_callback(void *data);
+void omap_aes_clear_copy_flags(struct omap_aes_dev *dd);
+
+#endif
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
new file mode 100644
index 0000000..2c42e4b
--- /dev/null
+++ b/drivers/crypto/omap-crypto.c
@@ -0,0 +1,188 @@
+/*
+ * OMAP Crypto driver common support routines.
+ *
+ * Copyright (c) 2017 Texas Instruments Incorporated
+ *   Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+
+#include "omap-crypto.h"
+
+static int omap_crypto_copy_sg_lists(int total, int bs,
+				     struct scatterlist **sg,
+				     struct scatterlist *new_sg, u16 flags)
+{
+	int n = sg_nents(*sg);
+	struct scatterlist *tmp;
+
+	if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
+		new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
+		if (!new_sg)
+			return -ENOMEM;
+
+		sg_init_table(new_sg, n);
+	}
+
+	tmp = new_sg;
+
+	while (*sg && total) {
+		int len = (*sg)->length;
+
+		if (total < len)
+			len = total;
+
+		if (len > 0) {
+			total -= len;
+			sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
+			if (total <= 0)
+				sg_mark_end(tmp);
+			tmp = sg_next(tmp);
+		}
+
+		*sg = sg_next(*sg);
+	}
+
+	*sg = new_sg;
+
+	return 0;
+}
+
+static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
+				struct scatterlist *new_sg, u16 flags)
+{
+	void *buf;
+	int pages;
+	int new_len;
+
+	new_len = ALIGN(total, bs);
+	pages = get_order(new_len);
+
+	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+	if (!buf) {
+		pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
+		       __func__);
+		return -ENOMEM;
+	}
+
+	if (flags & OMAP_CRYPTO_COPY_DATA) {
+		scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
+		if (flags & OMAP_CRYPTO_ZERO_BUF)
+			memset(buf + total, 0, new_len - total);
+	}
+
+	if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
+		sg_init_table(new_sg, 1);
+
+	sg_set_buf(new_sg, buf, new_len);
+
+	*sg = new_sg;
+
+	return 0;
+}
+
+static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
+				u16 flags)
+{
+	int len = 0;
+	int num_sg = 0;
+
+	if (!IS_ALIGNED(total, bs))
+		return OMAP_CRYPTO_NOT_ALIGNED;
+
+	while (sg) {
+		num_sg++;
+
+		if (!IS_ALIGNED(sg->offset, 4))
+			return OMAP_CRYPTO_NOT_ALIGNED;
+		if (!IS_ALIGNED(sg->length, bs))
+			return OMAP_CRYPTO_NOT_ALIGNED;
+#ifdef CONFIG_ZONE_DMA
+		if (page_zonenum(sg_page(sg)) != ZONE_DMA)
+			return OMAP_CRYPTO_NOT_ALIGNED;
+#endif
+
+		len += sg->length;
+		sg = sg_next(sg);
+
+		if (len >= total)
+			break;
+	}
+
+	if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1)
+		return OMAP_CRYPTO_NOT_ALIGNED;
+
+	if (len != total)
+		return OMAP_CRYPTO_BAD_DATA_LENGTH;
+
+	return 0;
+}
+
+int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
+			 struct scatterlist *new_sg, u16 flags,
+			 u8 flags_shift, unsigned long *dd_flags)
+{
+	int ret;
+
+	*dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift);
+
+	if (flags & OMAP_CRYPTO_FORCE_COPY)
+		ret = OMAP_CRYPTO_NOT_ALIGNED;
+	else
+		ret = omap_crypto_check_sg(*sg, total, bs, flags);
+
+	if (ret == OMAP_CRYPTO_NOT_ALIGNED) {
+		ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags);
+		if (ret)
+			return ret;
+		*dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift;
+	} else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) {
+		ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags);
+		if (ret)
+			return ret;
+		if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
+			*dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift;
+	} else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) {
+		sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
+
+void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
+			 int offset, int len, u8 flags_shift,
+			 unsigned long flags)
+{
+	void *buf;
+	int pages;
+
+	flags >>= flags_shift;
+	flags &= OMAP_CRYPTO_COPY_MASK;
+
+	if (!flags)
+		return;
+
+	buf = sg_virt(sg);
+	pages = get_order(len);
+
+	if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
+		scatterwalk_map_and_copy(buf, orig, offset, len, 1);
+
+	if (flags & OMAP_CRYPTO_DATA_COPIED)
+		free_pages((unsigned long)buf, pages);
+	else if (flags & OMAP_CRYPTO_SG_COPIED)
+		kfree(sg);
+}
+EXPORT_SYMBOL_GPL(omap_crypto_cleanup);
+
+MODULE_DESCRIPTION("OMAP crypto support library.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");
diff --git a/drivers/crypto/omap-crypto.h b/drivers/crypto/omap-crypto.h
new file mode 100644
index 0000000..36a230e
--- /dev/null
+++ b/drivers/crypto/omap-crypto.h
@@ -0,0 +1,37 @@
+/*
+ * OMAP Crypto driver common support routines.
+ *
+ * Copyright (c) 2017 Texas Instruments Incorporated
+ *   Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __CRYPTO_OMAP_CRYPTO_H
+#define __CRYPTO_OMAP_CRYPTO_H
+
+enum {
+	OMAP_CRYPTO_NOT_ALIGNED = 1,
+	OMAP_CRYPTO_BAD_DATA_LENGTH,
+};
+
+#define OMAP_CRYPTO_DATA_COPIED		BIT(0)
+#define OMAP_CRYPTO_SG_COPIED		BIT(1)
+
+#define OMAP_CRYPTO_COPY_MASK		0x3
+
+#define OMAP_CRYPTO_COPY_DATA		BIT(0)
+#define OMAP_CRYPTO_FORCE_COPY		BIT(1)
+#define OMAP_CRYPTO_ZERO_BUF		BIT(2)
+#define OMAP_CRYPTO_FORCE_SINGLE_ENTRY	BIT(3)
+
+int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
+			 struct scatterlist *new_sg, u16 flags,
+			 u8 flags_shift, unsigned long *dd_flags);
+void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
+			 int offset, int len, u8 flags_shift,
+			 unsigned long flags);
+
+#endif
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
new file mode 100644
index 0000000..eb95b0d
--- /dev/null
+++ b/drivers/crypto/omap-des.c
@@ -0,0 +1,1168 @@
+/*
+ * Support for OMAP DES and Triple DES HW acceleration.
+ *
+ * Copyright (c) 2013 Texas Instruments Incorporated
+ * Author: Joel Fernandes <joelf@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#ifdef DEBUG
+#define prn(num) printk(#num "=%d\n", num)
+#define prx(num) printk(#num "=%x\n", num)
+#else
+#define prn(num) do { } while (0)
+#define prx(num)  do { } while (0)
+#endif
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/des.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+
+#include "omap-crypto.h"
+
+#define DST_MAXBURST			2
+
+#define DES_BLOCK_WORDS		(DES_BLOCK_SIZE >> 2)
+
+#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
+
+#define DES_REG_KEY(dd, x)		((dd)->pdata->key_ofs - \
+						((x ^ 0x01) * 0x04))
+
+#define DES_REG_IV(dd, x)		((dd)->pdata->iv_ofs + ((x) * 0x04))
+
+#define DES_REG_CTRL(dd)		((dd)->pdata->ctrl_ofs)
+#define DES_REG_CTRL_CBC		BIT(4)
+#define DES_REG_CTRL_TDES		BIT(3)
+#define DES_REG_CTRL_DIRECTION		BIT(2)
+#define DES_REG_CTRL_INPUT_READY	BIT(1)
+#define DES_REG_CTRL_OUTPUT_READY	BIT(0)
+
+#define DES_REG_DATA_N(dd, x)		((dd)->pdata->data_ofs + ((x) * 0x04))
+
+#define DES_REG_REV(dd)			((dd)->pdata->rev_ofs)
+
+#define DES_REG_MASK(dd)		((dd)->pdata->mask_ofs)
+
+#define DES_REG_LENGTH_N(x)		(0x24 + ((x) * 0x04))
+
+#define DES_REG_IRQ_STATUS(dd)         ((dd)->pdata->irq_status_ofs)
+#define DES_REG_IRQ_ENABLE(dd)         ((dd)->pdata->irq_enable_ofs)
+#define DES_REG_IRQ_DATA_IN            BIT(1)
+#define DES_REG_IRQ_DATA_OUT           BIT(2)
+
+#define FLAGS_MODE_MASK		0x000f
+#define FLAGS_ENCRYPT		BIT(0)
+#define FLAGS_CBC		BIT(1)
+#define FLAGS_INIT		BIT(4)
+#define FLAGS_BUSY		BIT(6)
+
+#define DEFAULT_AUTOSUSPEND_DELAY	1000
+
+#define FLAGS_IN_DATA_ST_SHIFT	8
+#define FLAGS_OUT_DATA_ST_SHIFT	10
+
+struct omap_des_ctx {
+	struct crypto_engine_ctx enginectx;
+	struct omap_des_dev *dd;
+
+	int		keylen;
+	u32		key[(3 * DES_KEY_SIZE) / sizeof(u32)];
+	unsigned long	flags;
+};
+
+struct omap_des_reqctx {
+	unsigned long mode;
+};
+
+#define OMAP_DES_QUEUE_LENGTH	1
+#define OMAP_DES_CACHE_SIZE	0
+
+struct omap_des_algs_info {
+	struct crypto_alg	*algs_list;
+	unsigned int		size;
+	unsigned int		registered;
+};
+
+struct omap_des_pdata {
+	struct omap_des_algs_info	*algs_info;
+	unsigned int	algs_info_size;
+
+	void		(*trigger)(struct omap_des_dev *dd, int length);
+
+	u32		key_ofs;
+	u32		iv_ofs;
+	u32		ctrl_ofs;
+	u32		data_ofs;
+	u32		rev_ofs;
+	u32		mask_ofs;
+	u32             irq_enable_ofs;
+	u32             irq_status_ofs;
+
+	u32		dma_enable_in;
+	u32		dma_enable_out;
+	u32		dma_start;
+
+	u32		major_mask;
+	u32		major_shift;
+	u32		minor_mask;
+	u32		minor_shift;
+};
+
+struct omap_des_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	void __iomem		*io_base;
+	struct omap_des_ctx	*ctx;
+	struct device		*dev;
+	unsigned long		flags;
+	int			err;
+
+	struct tasklet_struct	done_task;
+
+	struct ablkcipher_request	*req;
+	struct crypto_engine		*engine;
+	/*
+	 * total is used by PIO mode for book keeping so introduce
+	 * variable total_save as need it to calc page_order
+	 */
+	size_t                          total;
+	size_t                          total_save;
+
+	struct scatterlist		*in_sg;
+	struct scatterlist		*out_sg;
+
+	/* Buffers for copying for unaligned cases */
+	struct scatterlist		in_sgl;
+	struct scatterlist		out_sgl;
+	struct scatterlist		*orig_out;
+
+	struct scatter_walk		in_walk;
+	struct scatter_walk		out_walk;
+	struct dma_chan		*dma_lch_in;
+	struct dma_chan		*dma_lch_out;
+	int			in_sg_len;
+	int			out_sg_len;
+	int			pio_only;
+	const struct omap_des_pdata	*pdata;
+};
+
+/* keep registered devices data here */
+static LIST_HEAD(dev_list);
+static DEFINE_SPINLOCK(list_lock);
+
+#ifdef DEBUG
+#define omap_des_read(dd, offset)                               \
+	({                                                              \
+	 int _read_ret;                                          \
+	 _read_ret = __raw_readl(dd->io_base + offset);          \
+	 pr_err("omap_des_read(" #offset "=%#x)= %#x\n",       \
+		 offset, _read_ret);                            \
+	 _read_ret;                                              \
+	 })
+#else
+static inline u32 omap_des_read(struct omap_des_dev *dd, u32 offset)
+{
+	return __raw_readl(dd->io_base + offset);
+}
+#endif
+
+#ifdef DEBUG
+#define omap_des_write(dd, offset, value)                               \
+	do {                                                            \
+		pr_err("omap_des_write(" #offset "=%#x) value=%#x\n", \
+				offset, value);                                \
+		__raw_writel(value, dd->io_base + offset);              \
+	} while (0)
+#else
+static inline void omap_des_write(struct omap_des_dev *dd, u32 offset,
+		u32 value)
+{
+	__raw_writel(value, dd->io_base + offset);
+}
+#endif
+
+static inline void omap_des_write_mask(struct omap_des_dev *dd, u32 offset,
+					u32 value, u32 mask)
+{
+	u32 val;
+
+	val = omap_des_read(dd, offset);
+	val &= ~mask;
+	val |= value;
+	omap_des_write(dd, offset, val);
+}
+
+static void omap_des_write_n(struct omap_des_dev *dd, u32 offset,
+					u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		omap_des_write(dd, offset, *value);
+}
+
+static int omap_des_hw_init(struct omap_des_dev *dd)
+{
+	int err;
+
+	/*
+	 * clocks are enabled when request starts and disabled when finished.
+	 * It may be long delays between requests.
+	 * Device might go to off mode to save power.
+	 */
+	err = pm_runtime_get_sync(dd->dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(dd->dev);
+		dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
+		return err;
+	}
+
+	if (!(dd->flags & FLAGS_INIT)) {
+		dd->flags |= FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static int omap_des_write_ctrl(struct omap_des_dev *dd)
+{
+	unsigned int key32;
+	int i, err;
+	u32 val = 0, mask = 0;
+
+	err = omap_des_hw_init(dd);
+	if (err)
+		return err;
+
+	key32 = dd->ctx->keylen / sizeof(u32);
+
+	/* it seems a key should always be set even if it has not changed */
+	for (i = 0; i < key32; i++) {
+		omap_des_write(dd, DES_REG_KEY(dd, i),
+			       __le32_to_cpu(dd->ctx->key[i]));
+	}
+
+	if ((dd->flags & FLAGS_CBC) && dd->req->info)
+		omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2);
+
+	if (dd->flags & FLAGS_CBC)
+		val |= DES_REG_CTRL_CBC;
+	if (dd->flags & FLAGS_ENCRYPT)
+		val |= DES_REG_CTRL_DIRECTION;
+	if (key32 == 6)
+		val |= DES_REG_CTRL_TDES;
+
+	mask |= DES_REG_CTRL_CBC | DES_REG_CTRL_DIRECTION | DES_REG_CTRL_TDES;
+
+	omap_des_write_mask(dd, DES_REG_CTRL(dd), val, mask);
+
+	return 0;
+}
+
+static void omap_des_dma_trigger_omap4(struct omap_des_dev *dd, int length)
+{
+	u32 mask, val;
+
+	omap_des_write(dd, DES_REG_LENGTH_N(0), length);
+
+	val = dd->pdata->dma_start;
+
+	if (dd->dma_lch_out != NULL)
+		val |= dd->pdata->dma_enable_out;
+	if (dd->dma_lch_in != NULL)
+		val |= dd->pdata->dma_enable_in;
+
+	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+	       dd->pdata->dma_start;
+
+	omap_des_write_mask(dd, DES_REG_MASK(dd), val, mask);
+}
+
+static void omap_des_dma_stop(struct omap_des_dev *dd)
+{
+	u32 mask;
+
+	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
+	       dd->pdata->dma_start;
+
+	omap_des_write_mask(dd, DES_REG_MASK(dd), 0, mask);
+}
+
+static struct omap_des_dev *omap_des_find_dev(struct omap_des_ctx *ctx)
+{
+	struct omap_des_dev *dd = NULL, *tmp;
+
+	spin_lock_bh(&list_lock);
+	if (!ctx->dd) {
+		list_for_each_entry(tmp, &dev_list, list) {
+			/* FIXME: take fist available des core */
+			dd = tmp;
+			break;
+		}
+		ctx->dd = dd;
+	} else {
+		/* already found before */
+		dd = ctx->dd;
+	}
+	spin_unlock_bh(&list_lock);
+
+	return dd;
+}
+
+static void omap_des_dma_out_callback(void *data)
+{
+	struct omap_des_dev *dd = data;
+
+	/* dma_lch_out - completed */
+	tasklet_schedule(&dd->done_task);
+}
+
+static int omap_des_dma_init(struct omap_des_dev *dd)
+{
+	int err;
+
+	dd->dma_lch_out = NULL;
+	dd->dma_lch_in = NULL;
+
+	dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
+	if (IS_ERR(dd->dma_lch_in)) {
+		dev_err(dd->dev, "Unable to request in DMA channel\n");
+		return PTR_ERR(dd->dma_lch_in);
+	}
+
+	dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
+	if (IS_ERR(dd->dma_lch_out)) {
+		dev_err(dd->dev, "Unable to request out DMA channel\n");
+		err = PTR_ERR(dd->dma_lch_out);
+		goto err_dma_out;
+	}
+
+	return 0;
+
+err_dma_out:
+	dma_release_channel(dd->dma_lch_in);
+
+	return err;
+}
+
+static void omap_des_dma_cleanup(struct omap_des_dev *dd)
+{
+	if (dd->pio_only)
+		return;
+
+	dma_release_channel(dd->dma_lch_out);
+	dma_release_channel(dd->dma_lch_in);
+}
+
+static int omap_des_crypt_dma(struct crypto_tfm *tfm,
+		struct scatterlist *in_sg, struct scatterlist *out_sg,
+		int in_sg_len, int out_sg_len)
+{
+	struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct omap_des_dev *dd = ctx->dd;
+	struct dma_async_tx_descriptor *tx_in, *tx_out;
+	struct dma_slave_config cfg;
+	int ret;
+
+	if (dd->pio_only) {
+		scatterwalk_start(&dd->in_walk, dd->in_sg);
+		scatterwalk_start(&dd->out_walk, dd->out_sg);
+
+		/* Enable DATAIN interrupt and let it take
+		   care of the rest */
+		omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
+		return 0;
+	}
+
+	dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
+
+	memset(&cfg, 0, sizeof(cfg));
+
+	cfg.src_addr = dd->phys_base + DES_REG_DATA_N(dd, 0);
+	cfg.dst_addr = dd->phys_base + DES_REG_DATA_N(dd, 0);
+	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.src_maxburst = DST_MAXBURST;
+	cfg.dst_maxburst = DST_MAXBURST;
+
+	/* IN */
+	ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
+	if (ret) {
+		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+			ret);
+		return ret;
+	}
+
+	tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
+					DMA_MEM_TO_DEV,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx_in) {
+		dev_err(dd->dev, "IN prep_slave_sg() failed\n");
+		return -EINVAL;
+	}
+
+	/* No callback necessary */
+	tx_in->callback_param = dd;
+
+	/* OUT */
+	ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+	if (ret) {
+		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+			ret);
+		return ret;
+	}
+
+	tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
+					DMA_DEV_TO_MEM,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx_out) {
+		dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+		return -EINVAL;
+	}
+
+	tx_out->callback = omap_des_dma_out_callback;
+	tx_out->callback_param = dd;
+
+	dmaengine_submit(tx_in);
+	dmaengine_submit(tx_out);
+
+	dma_async_issue_pending(dd->dma_lch_in);
+	dma_async_issue_pending(dd->dma_lch_out);
+
+	/* start DMA */
+	dd->pdata->trigger(dd, dd->total);
+
+	return 0;
+}
+
+static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
+					crypto_ablkcipher_reqtfm(dd->req));
+	int err;
+
+	pr_debug("total: %d\n", dd->total);
+
+	if (!dd->pio_only) {
+		err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
+				 DMA_TO_DEVICE);
+		if (!err) {
+			dev_err(dd->dev, "dma_map_sg() error\n");
+			return -EINVAL;
+		}
+
+		err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+				 DMA_FROM_DEVICE);
+		if (!err) {
+			dev_err(dd->dev, "dma_map_sg() error\n");
+			return -EINVAL;
+		}
+	}
+
+	err = omap_des_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
+				 dd->out_sg_len);
+	if (err && !dd->pio_only) {
+		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+			     DMA_FROM_DEVICE);
+	}
+
+	return err;
+}
+
+static void omap_des_finish_req(struct omap_des_dev *dd, int err)
+{
+	struct ablkcipher_request *req = dd->req;
+
+	pr_debug("err: %d\n", err);
+
+	crypto_finalize_ablkcipher_request(dd->engine, req, err);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+}
+
+static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
+{
+	pr_debug("total: %d\n", dd->total);
+
+	omap_des_dma_stop(dd);
+
+	dmaengine_terminate_all(dd->dma_lch_in);
+	dmaengine_terminate_all(dd->dma_lch_out);
+
+	return 0;
+}
+
+static int omap_des_handle_queue(struct omap_des_dev *dd,
+				 struct ablkcipher_request *req)
+{
+	if (req)
+		return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+
+	return 0;
+}
+
+static int omap_des_prepare_req(struct crypto_engine *engine,
+				void *areq)
+{
+	struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
+	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct omap_des_dev *dd = omap_des_find_dev(ctx);
+	struct omap_des_reqctx *rctx;
+	int ret;
+	u16 flags;
+
+	if (!dd)
+		return -ENODEV;
+
+	/* assign new request to device */
+	dd->req = req;
+	dd->total = req->nbytes;
+	dd->total_save = req->nbytes;
+	dd->in_sg = req->src;
+	dd->out_sg = req->dst;
+	dd->orig_out = req->dst;
+
+	flags = OMAP_CRYPTO_COPY_DATA;
+	if (req->src == req->dst)
+		flags |= OMAP_CRYPTO_FORCE_COPY;
+
+	ret = omap_crypto_align_sg(&dd->in_sg, dd->total, DES_BLOCK_SIZE,
+				   &dd->in_sgl, flags,
+				   FLAGS_IN_DATA_ST_SHIFT, &dd->flags);
+	if (ret)
+		return ret;
+
+	ret = omap_crypto_align_sg(&dd->out_sg, dd->total, DES_BLOCK_SIZE,
+				   &dd->out_sgl, 0,
+				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
+	if (ret)
+		return ret;
+
+	dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
+	if (dd->in_sg_len < 0)
+		return dd->in_sg_len;
+
+	dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
+	if (dd->out_sg_len < 0)
+		return dd->out_sg_len;
+
+	rctx = ablkcipher_request_ctx(req);
+	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+	rctx->mode &= FLAGS_MODE_MASK;
+	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+	dd->ctx = ctx;
+	ctx->dd = dd;
+
+	return omap_des_write_ctrl(dd);
+}
+
+static int omap_des_crypt_req(struct crypto_engine *engine,
+			      void *areq)
+{
+	struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
+	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct omap_des_dev *dd = omap_des_find_dev(ctx);
+
+	if (!dd)
+		return -ENODEV;
+
+	return omap_des_crypt_dma_start(dd);
+}
+
+static void omap_des_done_task(unsigned long data)
+{
+	struct omap_des_dev *dd = (struct omap_des_dev *)data;
+
+	pr_debug("enter done_task\n");
+
+	if (!dd->pio_only) {
+		dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
+				       DMA_FROM_DEVICE);
+		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+			     DMA_FROM_DEVICE);
+		omap_des_crypt_dma_stop(dd);
+	}
+
+	omap_crypto_cleanup(&dd->in_sgl, NULL, 0, dd->total_save,
+			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
+
+	omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
+			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+
+	omap_des_finish_req(dd, 0);
+
+	pr_debug("exit\n");
+}
+
+static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct omap_des_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct omap_des_dev *dd;
+
+	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+		 !!(mode & FLAGS_ENCRYPT),
+		 !!(mode & FLAGS_CBC));
+
+	if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+		pr_err("request size is not exact amount of DES blocks\n");
+		return -EINVAL;
+	}
+
+	dd = omap_des_find_dev(ctx);
+	if (!dd)
+		return -ENODEV;
+
+	rctx->mode = mode;
+
+	return omap_des_handle_queue(dd, req);
+}
+
+/* ********************** ALG API ************************************ */
+
+static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+
+	if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE))
+		return -EINVAL;
+
+	pr_debug("enter, keylen: %d\n", keylen);
+
+	/* Do we need to test against weak key? */
+	if (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+		u32 tmp[DES_EXPKEY_WORDS];
+		int ret = des_ekey(tmp, key);
+
+		if (!ret) {
+			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			return -EINVAL;
+		}
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return omap_des_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int omap_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return omap_des_crypt(req, 0);
+}
+
+static int omap_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return omap_des_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return omap_des_crypt(req, FLAGS_CBC);
+}
+
+static int omap_des_prepare_req(struct crypto_engine *engine,
+				void *areq);
+static int omap_des_crypt_req(struct crypto_engine *engine,
+			      void *areq);
+
+static int omap_des_cra_init(struct crypto_tfm *tfm)
+{
+	struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	pr_debug("enter\n");
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx);
+
+	ctx->enginectx.op.prepare_request = omap_des_prepare_req;
+	ctx->enginectx.op.unprepare_request = NULL;
+	ctx->enginectx.op.do_one_request = omap_des_crypt_req;
+
+	return 0;
+}
+
+static void omap_des_cra_exit(struct crypto_tfm *tfm)
+{
+	pr_debug("enter\n");
+}
+
+/* ********************** ALGS ************************************ */
+
+static struct crypto_alg algs_ecb_cbc[] = {
+{
+	.cra_name		= "ecb(des)",
+	.cra_driver_name	= "ecb-des-omap",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct omap_des_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= omap_des_cra_init,
+	.cra_exit		= omap_des_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.setkey		= omap_des_setkey,
+		.encrypt	= omap_des_ecb_encrypt,
+		.decrypt	= omap_des_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(des)",
+	.cra_driver_name	= "cbc-des-omap",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct omap_des_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= omap_des_cra_init,
+	.cra_exit		= omap_des_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= omap_des_setkey,
+		.encrypt	= omap_des_cbc_encrypt,
+		.decrypt	= omap_des_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "ecb(des3_ede)",
+	.cra_driver_name	= "ecb-des3-omap",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct omap_des_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= omap_des_cra_init,
+	.cra_exit		= omap_des_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 3*DES_KEY_SIZE,
+		.max_keysize	= 3*DES_KEY_SIZE,
+		.setkey		= omap_des_setkey,
+		.encrypt	= omap_des_ecb_encrypt,
+		.decrypt	= omap_des_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(des3_ede)",
+	.cra_driver_name	= "cbc-des3-omap",
+	.cra_priority		= 100,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct omap_des_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= omap_des_cra_init,
+	.cra_exit		= omap_des_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 3*DES_KEY_SIZE,
+		.max_keysize	= 3*DES_KEY_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= omap_des_setkey,
+		.encrypt	= omap_des_cbc_encrypt,
+		.decrypt	= omap_des_cbc_decrypt,
+	}
+}
+};
+
+static struct omap_des_algs_info omap_des_algs_info_ecb_cbc[] = {
+	{
+		.algs_list	= algs_ecb_cbc,
+		.size		= ARRAY_SIZE(algs_ecb_cbc),
+	},
+};
+
+#ifdef CONFIG_OF
+static const struct omap_des_pdata omap_des_pdata_omap4 = {
+	.algs_info	= omap_des_algs_info_ecb_cbc,
+	.algs_info_size	= ARRAY_SIZE(omap_des_algs_info_ecb_cbc),
+	.trigger	= omap_des_dma_trigger_omap4,
+	.key_ofs	= 0x14,
+	.iv_ofs		= 0x18,
+	.ctrl_ofs	= 0x20,
+	.data_ofs	= 0x28,
+	.rev_ofs	= 0x30,
+	.mask_ofs	= 0x34,
+	.irq_status_ofs = 0x3c,
+	.irq_enable_ofs = 0x40,
+	.dma_enable_in	= BIT(5),
+	.dma_enable_out	= BIT(6),
+	.major_mask	= 0x0700,
+	.major_shift	= 8,
+	.minor_mask	= 0x003f,
+	.minor_shift	= 0,
+};
+
+static irqreturn_t omap_des_irq(int irq, void *dev_id)
+{
+	struct omap_des_dev *dd = dev_id;
+	u32 status, i;
+	u32 *src, *dst;
+
+	status = omap_des_read(dd, DES_REG_IRQ_STATUS(dd));
+	if (status & DES_REG_IRQ_DATA_IN) {
+		omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0);
+
+		BUG_ON(!dd->in_sg);
+
+		BUG_ON(_calc_walked(in) > dd->in_sg->length);
+
+		src = sg_virt(dd->in_sg) + _calc_walked(in);
+
+		for (i = 0; i < DES_BLOCK_WORDS; i++) {
+			omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
+
+			scatterwalk_advance(&dd->in_walk, 4);
+			if (dd->in_sg->length == _calc_walked(in)) {
+				dd->in_sg = sg_next(dd->in_sg);
+				if (dd->in_sg) {
+					scatterwalk_start(&dd->in_walk,
+							  dd->in_sg);
+					src = sg_virt(dd->in_sg) +
+					      _calc_walked(in);
+				}
+			} else {
+				src++;
+			}
+		}
+
+		/* Clear IRQ status */
+		status &= ~DES_REG_IRQ_DATA_IN;
+		omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status);
+
+		/* Enable DATA_OUT interrupt */
+		omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x4);
+
+	} else if (status & DES_REG_IRQ_DATA_OUT) {
+		omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0);
+
+		BUG_ON(!dd->out_sg);
+
+		BUG_ON(_calc_walked(out) > dd->out_sg->length);
+
+		dst = sg_virt(dd->out_sg) + _calc_walked(out);
+
+		for (i = 0; i < DES_BLOCK_WORDS; i++) {
+			*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
+			scatterwalk_advance(&dd->out_walk, 4);
+			if (dd->out_sg->length == _calc_walked(out)) {
+				dd->out_sg = sg_next(dd->out_sg);
+				if (dd->out_sg) {
+					scatterwalk_start(&dd->out_walk,
+							  dd->out_sg);
+					dst = sg_virt(dd->out_sg) +
+					      _calc_walked(out);
+				}
+			} else {
+				dst++;
+			}
+		}
+
+		BUG_ON(dd->total < DES_BLOCK_SIZE);
+
+		dd->total -= DES_BLOCK_SIZE;
+
+		/* Clear IRQ status */
+		status &= ~DES_REG_IRQ_DATA_OUT;
+		omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status);
+
+		if (!dd->total)
+			/* All bytes read! */
+			tasklet_schedule(&dd->done_task);
+		else
+			/* Enable DATA_IN interrupt for next block */
+			omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static const struct of_device_id omap_des_of_match[] = {
+	{
+		.compatible	= "ti,omap4-des",
+		.data		= &omap_des_pdata_omap4,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, omap_des_of_match);
+
+static int omap_des_get_of(struct omap_des_dev *dd,
+		struct platform_device *pdev)
+{
+
+	dd->pdata = of_device_get_match_data(&pdev->dev);
+	if (!dd->pdata) {
+		dev_err(&pdev->dev, "no compatible OF match\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#else
+static int omap_des_get_of(struct omap_des_dev *dd,
+		struct device *dev)
+{
+	return -EINVAL;
+}
+#endif
+
+static int omap_des_get_pdev(struct omap_des_dev *dd,
+		struct platform_device *pdev)
+{
+	/* non-DT devices get pdata from pdev */
+	dd->pdata = pdev->dev.platform_data;
+
+	return 0;
+}
+
+static int omap_des_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct omap_des_dev *dd;
+	struct crypto_alg *algp;
+	struct resource *res;
+	int err = -ENOMEM, i, j, irq = -1;
+	u32 reg;
+
+	dd = devm_kzalloc(dev, sizeof(struct omap_des_dev), GFP_KERNEL);
+	if (dd == NULL) {
+		dev_err(dev, "unable to alloc data struct.\n");
+		goto err_data;
+	}
+	dd->dev = dev;
+	platform_set_drvdata(pdev, dd);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "no MEM resource info\n");
+		goto err_res;
+	}
+
+	err = (dev->of_node) ? omap_des_get_of(dd, pdev) :
+			       omap_des_get_pdev(dd, pdev);
+	if (err)
+		goto err_res;
+
+	dd->io_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dd->io_base)) {
+		err = PTR_ERR(dd->io_base);
+		goto err_res;
+	}
+	dd->phys_base = res->start;
+
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+
+	pm_runtime_enable(dev);
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(dev);
+		dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
+		goto err_get;
+	}
+
+	omap_des_dma_stop(dd);
+
+	reg = omap_des_read(dd, DES_REG_REV(dd));
+
+	pm_runtime_put_sync(dev);
+
+	dev_info(dev, "OMAP DES hw accel rev: %u.%u\n",
+		 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
+		 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
+
+	tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
+
+	err = omap_des_dma_init(dd);
+	if (err == -EPROBE_DEFER) {
+		goto err_irq;
+	} else if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
+		dd->pio_only = 1;
+
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0) {
+			dev_err(dev, "can't get IRQ resource: %d\n", irq);
+			err = irq;
+			goto err_irq;
+		}
+
+		err = devm_request_irq(dev, irq, omap_des_irq, 0,
+				dev_name(dev), dd);
+		if (err) {
+			dev_err(dev, "Unable to grab omap-des IRQ\n");
+			goto err_irq;
+		}
+	}
+
+
+	INIT_LIST_HEAD(&dd->list);
+	spin_lock(&list_lock);
+	list_add_tail(&dd->list, &dev_list);
+	spin_unlock(&list_lock);
+
+	/* Initialize des crypto engine */
+	dd->engine = crypto_engine_alloc_init(dev, 1);
+	if (!dd->engine) {
+		err = -ENOMEM;
+		goto err_engine;
+	}
+
+	err = crypto_engine_start(dd->engine);
+	if (err)
+		goto err_engine;
+
+	for (i = 0; i < dd->pdata->algs_info_size; i++) {
+		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+			algp = &dd->pdata->algs_info[i].algs_list[j];
+
+			pr_debug("reg alg: %s\n", algp->cra_name);
+			INIT_LIST_HEAD(&algp->cra_list);
+
+			err = crypto_register_alg(algp);
+			if (err)
+				goto err_algs;
+
+			dd->pdata->algs_info[i].registered++;
+		}
+	}
+
+	return 0;
+
+err_algs:
+	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+			crypto_unregister_alg(
+					&dd->pdata->algs_info[i].algs_list[j]);
+
+err_engine:
+	if (dd->engine)
+		crypto_engine_exit(dd->engine);
+
+	omap_des_dma_cleanup(dd);
+err_irq:
+	tasklet_kill(&dd->done_task);
+err_get:
+	pm_runtime_disable(dev);
+err_res:
+	dd = NULL;
+err_data:
+	dev_err(dev, "initialization failed.\n");
+	return err;
+}
+
+static int omap_des_remove(struct platform_device *pdev)
+{
+	struct omap_des_dev *dd = platform_get_drvdata(pdev);
+	int i, j;
+
+	if (!dd)
+		return -ENODEV;
+
+	spin_lock(&list_lock);
+	list_del(&dd->list);
+	spin_unlock(&list_lock);
+
+	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+			crypto_unregister_alg(
+					&dd->pdata->algs_info[i].algs_list[j]);
+
+	tasklet_kill(&dd->done_task);
+	omap_des_dma_cleanup(dd);
+	pm_runtime_disable(dd->dev);
+	dd = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_des_suspend(struct device *dev)
+{
+	pm_runtime_put_sync(dev);
+	return 0;
+}
+
+static int omap_des_resume(struct device *dev)
+{
+	int err;
+
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(dev);
+		dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err);
+		return err;
+	}
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(omap_des_pm_ops, omap_des_suspend, omap_des_resume);
+
+static struct platform_driver omap_des_driver = {
+	.probe	= omap_des_probe,
+	.remove	= omap_des_remove,
+	.driver	= {
+		.name	= "omap-des",
+		.pm	= &omap_des_pm_ops,
+		.of_match_table	= of_match_ptr(omap_des_of_match),
+	},
+};
+
+module_platform_driver(omap_des_driver);
+
+MODULE_DESCRIPTION("OMAP DES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joel Fernandes <joelf@ti.com>");
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
new file mode 100644
index 0000000..0641185
--- /dev/null
+++ b/drivers/crypto/omap-sham.c
@@ -0,0 +1,2270 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP SHA1/MD5 HW acceleration.
+ *
+ * Copyright (c) 2010 Nokia Corporation
+ * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ * Copyright (c) 2011 Texas Instruments Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from old omap-sha1-md5.c driver.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+
+#define MD5_DIGEST_SIZE			16
+
+#define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))
+#define SHA_REG_DIN(dd, x)		((dd)->pdata->din_ofs + ((x) * 0x04))
+#define SHA_REG_DIGCNT(dd)		((dd)->pdata->digcnt_ofs)
+
+#define SHA_REG_ODIGEST(dd, x)		((dd)->pdata->odigest_ofs + (x * 0x04))
+
+#define SHA_REG_CTRL			0x18
+#define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
+#define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
+#define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
+#define SHA_REG_CTRL_ALGO		(1 << 2)
+#define SHA_REG_CTRL_INPUT_READY	(1 << 1)
+#define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
+
+#define SHA_REG_REV(dd)			((dd)->pdata->rev_ofs)
+
+#define SHA_REG_MASK(dd)		((dd)->pdata->mask_ofs)
+#define SHA_REG_MASK_DMA_EN		(1 << 3)
+#define SHA_REG_MASK_IT_EN		(1 << 2)
+#define SHA_REG_MASK_SOFTRESET		(1 << 1)
+#define SHA_REG_AUTOIDLE		(1 << 0)
+
+#define SHA_REG_SYSSTATUS(dd)		((dd)->pdata->sysstatus_ofs)
+#define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
+
+#define SHA_REG_MODE(dd)		((dd)->pdata->mode_ofs)
+#define SHA_REG_MODE_HMAC_OUTER_HASH	(1 << 7)
+#define SHA_REG_MODE_HMAC_KEY_PROC	(1 << 5)
+#define SHA_REG_MODE_CLOSE_HASH		(1 << 4)
+#define SHA_REG_MODE_ALGO_CONSTANT	(1 << 3)
+
+#define SHA_REG_MODE_ALGO_MASK		(7 << 0)
+#define SHA_REG_MODE_ALGO_MD5_128	(0 << 1)
+#define SHA_REG_MODE_ALGO_SHA1_160	(1 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_224	(2 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_256	(3 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_384	(1 << 0)
+#define SHA_REG_MODE_ALGO_SHA2_512	(3 << 0)
+
+#define SHA_REG_LENGTH(dd)		((dd)->pdata->length_ofs)
+
+#define SHA_REG_IRQSTATUS		0x118
+#define SHA_REG_IRQSTATUS_CTX_RDY	(1 << 3)
+#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
+#define SHA_REG_IRQSTATUS_INPUT_RDY	(1 << 1)
+#define SHA_REG_IRQSTATUS_OUTPUT_RDY	(1 << 0)
+
+#define SHA_REG_IRQENA			0x11C
+#define SHA_REG_IRQENA_CTX_RDY		(1 << 3)
+#define SHA_REG_IRQENA_PARTHASH_RDY	(1 << 2)
+#define SHA_REG_IRQENA_INPUT_RDY	(1 << 1)
+#define SHA_REG_IRQENA_OUTPUT_RDY	(1 << 0)
+
+#define DEFAULT_TIMEOUT_INTERVAL	HZ
+
+#define DEFAULT_AUTOSUSPEND_DELAY	1000
+
+/* mostly device flags */
+#define FLAGS_BUSY		0
+#define FLAGS_FINAL		1
+#define FLAGS_DMA_ACTIVE	2
+#define FLAGS_OUTPUT_READY	3
+#define FLAGS_INIT		4
+#define FLAGS_CPU		5
+#define FLAGS_DMA_READY		6
+#define FLAGS_AUTO_XOR		7
+#define FLAGS_BE32_SHA1		8
+#define FLAGS_SGS_COPIED	9
+#define FLAGS_SGS_ALLOCED	10
+/* context flags */
+#define FLAGS_FINUP		16
+
+#define FLAGS_MODE_SHIFT	18
+#define FLAGS_MODE_MASK		(SHA_REG_MODE_ALGO_MASK	<< FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_MD5		(SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA1		(SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA224	(SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA256	(SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA384	(SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA512	(SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
+
+#define FLAGS_HMAC		21
+#define FLAGS_ERROR		22
+
+#define OP_UPDATE		1
+#define OP_FINAL		2
+
+#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
+#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
+
+#define BUFLEN			SHA512_BLOCK_SIZE
+#define OMAP_SHA_DMA_THRESHOLD	256
+
+struct omap_sham_dev;
+
+struct omap_sham_reqctx {
+	struct omap_sham_dev	*dd;
+	unsigned long		flags;
+	unsigned long		op;
+
+	u8			digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
+	size_t			digcnt;
+	size_t			bufcnt;
+	size_t			buflen;
+
+	/* walk state */
+	struct scatterlist	*sg;
+	struct scatterlist	sgl[2];
+	int			offset;	/* offset in current sg */
+	int			sg_len;
+	unsigned int		total;	/* total request */
+
+	u8			buffer[0] OMAP_ALIGNED;
+};
+
+struct omap_sham_hmac_ctx {
+	struct crypto_shash	*shash;
+	u8			ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
+	u8			opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
+};
+
+struct omap_sham_ctx {
+	struct omap_sham_dev	*dd;
+
+	unsigned long		flags;
+
+	/* fallback stuff */
+	struct crypto_shash	*fallback;
+
+	struct omap_sham_hmac_ctx base[0];
+};
+
+#define OMAP_SHAM_QUEUE_LENGTH	10
+
+struct omap_sham_algs_info {
+	struct ahash_alg	*algs_list;
+	unsigned int		size;
+	unsigned int		registered;
+};
+
+struct omap_sham_pdata {
+	struct omap_sham_algs_info	*algs_info;
+	unsigned int	algs_info_size;
+	unsigned long	flags;
+	int		digest_size;
+
+	void		(*copy_hash)(struct ahash_request *req, int out);
+	void		(*write_ctrl)(struct omap_sham_dev *dd, size_t length,
+				      int final, int dma);
+	void		(*trigger)(struct omap_sham_dev *dd, size_t length);
+	int		(*poll_irq)(struct omap_sham_dev *dd);
+	irqreturn_t	(*intr_hdlr)(int irq, void *dev_id);
+
+	u32		odigest_ofs;
+	u32		idigest_ofs;
+	u32		din_ofs;
+	u32		digcnt_ofs;
+	u32		rev_ofs;
+	u32		mask_ofs;
+	u32		sysstatus_ofs;
+	u32		mode_ofs;
+	u32		length_ofs;
+
+	u32		major_mask;
+	u32		major_shift;
+	u32		minor_mask;
+	u32		minor_shift;
+};
+
+struct omap_sham_dev {
+	struct list_head	list;
+	unsigned long		phys_base;
+	struct device		*dev;
+	void __iomem		*io_base;
+	int			irq;
+	spinlock_t		lock;
+	int			err;
+	struct dma_chan		*dma_lch;
+	struct tasklet_struct	done_task;
+	u8			polling_mode;
+	u8			xmit_buf[BUFLEN] OMAP_ALIGNED;
+
+	unsigned long		flags;
+	int			fallback_sz;
+	struct crypto_queue	queue;
+	struct ahash_request	*req;
+
+	const struct omap_sham_pdata	*pdata;
+};
+
+struct omap_sham_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock;
+	unsigned long		flags;
+};
+
+static struct omap_sham_drv sham = {
+	.dev_list = LIST_HEAD_INIT(sham.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
+};
+
+static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
+{
+	return __raw_readl(dd->io_base + offset);
+}
+
+static inline void omap_sham_write(struct omap_sham_dev *dd,
+					u32 offset, u32 value)
+{
+	__raw_writel(value, dd->io_base + offset);
+}
+
+static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
+					u32 value, u32 mask)
+{
+	u32 val;
+
+	val = omap_sham_read(dd, address);
+	val &= ~mask;
+	val |= value;
+	omap_sham_write(dd, address, val);
+}
+
+static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
+{
+	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
+
+	while (!(omap_sham_read(dd, offset) & bit)) {
+		if (time_is_before_jiffies(timeout))
+			return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
+	u32 *hash = (u32 *)ctx->digest;
+	int i;
+
+	for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
+		if (out)
+			hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
+		else
+			omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
+	}
+}
+
+static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
+	int i;
+
+	if (ctx->flags & BIT(FLAGS_HMAC)) {
+		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
+		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+		struct omap_sham_hmac_ctx *bctx = tctx->base;
+		u32 *opad = (u32 *)bctx->opad;
+
+		for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
+			if (out)
+				opad[i] = omap_sham_read(dd,
+						SHA_REG_ODIGEST(dd, i));
+			else
+				omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
+						opad[i]);
+		}
+	}
+
+	omap_sham_copy_hash_omap2(req, out);
+}
+
+static void omap_sham_copy_ready_hash(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	u32 *in = (u32 *)ctx->digest;
+	u32 *hash = (u32 *)req->result;
+	int i, d, big_endian = 0;
+
+	if (!hash)
+		return;
+
+	switch (ctx->flags & FLAGS_MODE_MASK) {
+	case FLAGS_MODE_MD5:
+		d = MD5_DIGEST_SIZE / sizeof(u32);
+		break;
+	case FLAGS_MODE_SHA1:
+		/* OMAP2 SHA1 is big endian */
+		if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
+			big_endian = 1;
+		d = SHA1_DIGEST_SIZE / sizeof(u32);
+		break;
+	case FLAGS_MODE_SHA224:
+		d = SHA224_DIGEST_SIZE / sizeof(u32);
+		break;
+	case FLAGS_MODE_SHA256:
+		d = SHA256_DIGEST_SIZE / sizeof(u32);
+		break;
+	case FLAGS_MODE_SHA384:
+		d = SHA384_DIGEST_SIZE / sizeof(u32);
+		break;
+	case FLAGS_MODE_SHA512:
+		d = SHA512_DIGEST_SIZE / sizeof(u32);
+		break;
+	default:
+		d = 0;
+	}
+
+	if (big_endian)
+		for (i = 0; i < d; i++)
+			hash[i] = be32_to_cpu(in[i]);
+	else
+		for (i = 0; i < d; i++)
+			hash[i] = le32_to_cpu(in[i]);
+}
+
+static int omap_sham_hw_init(struct omap_sham_dev *dd)
+{
+	int err;
+
+	err = pm_runtime_get_sync(dd->dev);
+	if (err < 0) {
+		dev_err(dd->dev, "failed to get sync: %d\n", err);
+		return err;
+	}
+
+	if (!test_bit(FLAGS_INIT, &dd->flags)) {
+		set_bit(FLAGS_INIT, &dd->flags);
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
+				 int final, int dma)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	u32 val = length << 5, mask;
+
+	if (likely(ctx->digcnt))
+		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
+
+	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
+		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
+		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+	/*
+	 * Setting ALGO_CONST only for the first iteration
+	 * and CLOSE_HASH only for the last one.
+	 */
+	if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
+		val |= SHA_REG_CTRL_ALGO;
+	if (!ctx->digcnt)
+		val |= SHA_REG_CTRL_ALGO_CONST;
+	if (final)
+		val |= SHA_REG_CTRL_CLOSE_HASH;
+
+	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
+			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
+
+	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
+}
+
+static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
+{
+}
+
+static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
+{
+	return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
+}
+
+static int get_block_size(struct omap_sham_reqctx *ctx)
+{
+	int d;
+
+	switch (ctx->flags & FLAGS_MODE_MASK) {
+	case FLAGS_MODE_MD5:
+	case FLAGS_MODE_SHA1:
+		d = SHA1_BLOCK_SIZE;
+		break;
+	case FLAGS_MODE_SHA224:
+	case FLAGS_MODE_SHA256:
+		d = SHA256_BLOCK_SIZE;
+		break;
+	case FLAGS_MODE_SHA384:
+	case FLAGS_MODE_SHA512:
+		d = SHA512_BLOCK_SIZE;
+		break;
+	default:
+		d = 0;
+	}
+
+	return d;
+}
+
+static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
+				    u32 *value, int count)
+{
+	for (; count--; value++, offset += 4)
+		omap_sham_write(dd, offset, *value);
+}
+
+static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
+				 int final, int dma)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	u32 val, mask;
+
+	/*
+	 * Setting ALGO_CONST only for the first iteration and
+	 * CLOSE_HASH only for the last one. Note that flags mode bits
+	 * correspond to algorithm encoding in mode register.
+	 */
+	val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
+	if (!ctx->digcnt) {
+		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
+		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+		struct omap_sham_hmac_ctx *bctx = tctx->base;
+		int bs, nr_dr;
+
+		val |= SHA_REG_MODE_ALGO_CONSTANT;
+
+		if (ctx->flags & BIT(FLAGS_HMAC)) {
+			bs = get_block_size(ctx);
+			nr_dr = bs / (2 * sizeof(u32));
+			val |= SHA_REG_MODE_HMAC_KEY_PROC;
+			omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
+					  (u32 *)bctx->ipad, nr_dr);
+			omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
+					  (u32 *)bctx->ipad + nr_dr, nr_dr);
+			ctx->digcnt += bs;
+		}
+	}
+
+	if (final) {
+		val |= SHA_REG_MODE_CLOSE_HASH;
+
+		if (ctx->flags & BIT(FLAGS_HMAC))
+			val |= SHA_REG_MODE_HMAC_OUTER_HASH;
+	}
+
+	mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
+	       SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
+	       SHA_REG_MODE_HMAC_KEY_PROC;
+
+	dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
+	omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
+	omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
+	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
+			     SHA_REG_MASK_IT_EN |
+				     (dma ? SHA_REG_MASK_DMA_EN : 0),
+			     SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+}
+
+static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
+{
+	omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
+}
+
+static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
+{
+	return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
+			      SHA_REG_IRQSTATUS_INPUT_RDY);
+}
+
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
+			      int final)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	int count, len32, bs32, offset = 0;
+	const u32 *buffer;
+	int mlen;
+	struct sg_mapping_iter mi;
+
+	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+						ctx->digcnt, length, final);
+
+	dd->pdata->write_ctrl(dd, length, final, 0);
+	dd->pdata->trigger(dd, length);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt += length;
+	ctx->total -= length;
+
+	if (final)
+		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
+
+	set_bit(FLAGS_CPU, &dd->flags);
+
+	len32 = DIV_ROUND_UP(length, sizeof(u32));
+	bs32 = get_block_size(ctx) / sizeof(u32);
+
+	sg_miter_start(&mi, ctx->sg, ctx->sg_len,
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+
+	mlen = 0;
+
+	while (len32) {
+		if (dd->pdata->poll_irq(dd))
+			return -ETIMEDOUT;
+
+		for (count = 0; count < min(len32, bs32); count++, offset++) {
+			if (!mlen) {
+				sg_miter_next(&mi);
+				mlen = mi.length;
+				if (!mlen) {
+					pr_err("sg miter failure.\n");
+					return -EINVAL;
+				}
+				offset = 0;
+				buffer = mi.addr;
+			}
+			omap_sham_write(dd, SHA_REG_DIN(dd, count),
+					buffer[offset]);
+			mlen -= 4;
+		}
+		len32 -= min(len32, bs32);
+	}
+
+	sg_miter_stop(&mi);
+
+	return -EINPROGRESS;
+}
+
+static void omap_sham_dma_callback(void *param)
+{
+	struct omap_sham_dev *dd = param;
+
+	set_bit(FLAGS_DMA_READY, &dd->flags);
+	tasklet_schedule(&dd->done_task);
+}
+
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
+			      int final)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+	struct dma_async_tx_descriptor *tx;
+	struct dma_slave_config cfg;
+	int ret;
+
+	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+						ctx->digcnt, length, final);
+
+	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
+		dev_err(dd->dev, "dma_map_sg error\n");
+		return -EINVAL;
+	}
+
+	memset(&cfg, 0, sizeof(cfg));
+
+	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
+	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
+	if (ret) {
+		pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
+		return ret;
+	}
+
+	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
+				     DMA_MEM_TO_DEV,
+				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+	if (!tx) {
+		dev_err(dd->dev, "prep_slave_sg failed\n");
+		return -EINVAL;
+	}
+
+	tx->callback = omap_sham_dma_callback;
+	tx->callback_param = dd;
+
+	dd->pdata->write_ctrl(dd, length, final, 1);
+
+	ctx->digcnt += length;
+	ctx->total -= length;
+
+	if (final)
+		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
+
+	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
+
+	dmaengine_submit(tx);
+	dma_async_issue_pending(dd->dma_lch);
+
+	dd->pdata->trigger(dd, length);
+
+	return -EINPROGRESS;
+}
+
+static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
+				   struct scatterlist *sg, int bs, int new_len)
+{
+	int n = sg_nents(sg);
+	struct scatterlist *tmp;
+	int offset = ctx->offset;
+
+	if (ctx->bufcnt)
+		n++;
+
+	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
+	if (!ctx->sg)
+		return -ENOMEM;
+
+	sg_init_table(ctx->sg, n);
+
+	tmp = ctx->sg;
+
+	ctx->sg_len = 0;
+
+	if (ctx->bufcnt) {
+		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
+		tmp = sg_next(tmp);
+		ctx->sg_len++;
+	}
+
+	while (sg && new_len) {
+		int len = sg->length - offset;
+
+		if (offset) {
+			offset -= sg->length;
+			if (offset < 0)
+				offset = 0;
+		}
+
+		if (new_len < len)
+			len = new_len;
+
+		if (len > 0) {
+			new_len -= len;
+			sg_set_page(tmp, sg_page(sg), len, sg->offset);
+			if (new_len <= 0)
+				sg_mark_end(tmp);
+			tmp = sg_next(tmp);
+			ctx->sg_len++;
+		}
+
+		sg = sg_next(sg);
+	}
+
+	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
+
+	ctx->bufcnt = 0;
+
+	return 0;
+}
+
+static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
+			      struct scatterlist *sg, int bs, int new_len)
+{
+	int pages;
+	void *buf;
+	int len;
+
+	len = new_len + ctx->bufcnt;
+
+	pages = get_order(ctx->total);
+
+	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+	if (!buf) {
+		pr_err("Couldn't allocate pages for unaligned cases.\n");
+		return -ENOMEM;
+	}
+
+	if (ctx->bufcnt)
+		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
+
+	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
+				 ctx->total - ctx->bufcnt, 0);
+	sg_init_table(ctx->sgl, 1);
+	sg_set_buf(ctx->sgl, buf, len);
+	ctx->sg = ctx->sgl;
+	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
+	ctx->sg_len = 1;
+	ctx->bufcnt = 0;
+	ctx->offset = 0;
+
+	return 0;
+}
+
+static int omap_sham_align_sgs(struct scatterlist *sg,
+			       int nbytes, int bs, bool final,
+			       struct omap_sham_reqctx *rctx)
+{
+	int n = 0;
+	bool aligned = true;
+	bool list_ok = true;
+	struct scatterlist *sg_tmp = sg;
+	int new_len;
+	int offset = rctx->offset;
+
+	if (!sg || !sg->length || !nbytes)
+		return 0;
+
+	new_len = nbytes;
+
+	if (offset)
+		list_ok = false;
+
+	if (final)
+		new_len = DIV_ROUND_UP(new_len, bs) * bs;
+	else
+		new_len = (new_len - 1) / bs * bs;
+
+	if (nbytes != new_len)
+		list_ok = false;
+
+	while (nbytes > 0 && sg_tmp) {
+		n++;
+
+#ifdef CONFIG_ZONE_DMA
+		if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
+			aligned = false;
+			break;
+		}
+#endif
+
+		if (offset < sg_tmp->length) {
+			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
+				aligned = false;
+				break;
+			}
+
+			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
+				aligned = false;
+				break;
+			}
+		}
+
+		if (offset) {
+			offset -= sg_tmp->length;
+			if (offset < 0) {
+				nbytes += offset;
+				offset = 0;
+			}
+		} else {
+			nbytes -= sg_tmp->length;
+		}
+
+		sg_tmp = sg_next(sg_tmp);
+
+		if (nbytes < 0) {
+			list_ok = false;
+			break;
+		}
+	}
+
+	if (!aligned)
+		return omap_sham_copy_sgs(rctx, sg, bs, new_len);
+	else if (!list_ok)
+		return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
+
+	rctx->sg_len = n;
+	rctx->sg = sg;
+
+	return 0;
+}
+
+static int omap_sham_prepare_request(struct ahash_request *req, bool update)
+{
+	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+	int bs;
+	int ret;
+	int nbytes;
+	bool final = rctx->flags & BIT(FLAGS_FINUP);
+	int xmit_len, hash_later;
+
+	bs = get_block_size(rctx);
+
+	if (update)
+		nbytes = req->nbytes;
+	else
+		nbytes = 0;
+
+	rctx->total = nbytes + rctx->bufcnt;
+
+	if (!rctx->total)
+		return 0;
+
+	if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
+		int len = bs - rctx->bufcnt % bs;
+
+		if (len > nbytes)
+			len = nbytes;
+		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
+					 0, len, 0);
+		rctx->bufcnt += len;
+		nbytes -= len;
+		rctx->offset = len;
+	}
+
+	if (rctx->bufcnt)
+		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
+
+	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
+	if (ret)
+		return ret;
+
+	xmit_len = rctx->total;
+
+	if (!IS_ALIGNED(xmit_len, bs)) {
+		if (final)
+			xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
+		else
+			xmit_len = xmit_len / bs * bs;
+	} else if (!final) {
+		xmit_len -= bs;
+	}
+
+	hash_later = rctx->total - xmit_len;
+	if (hash_later < 0)
+		hash_later = 0;
+
+	if (rctx->bufcnt && nbytes) {
+		/* have data from previous operation and current */
+		sg_init_table(rctx->sgl, 2);
+		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
+
+		sg_chain(rctx->sgl, 2, req->src);
+
+		rctx->sg = rctx->sgl;
+
+		rctx->sg_len++;
+	} else if (rctx->bufcnt) {
+		/* have buffered data only */
+		sg_init_table(rctx->sgl, 1);
+		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
+
+		rctx->sg = rctx->sgl;
+
+		rctx->sg_len = 1;
+	}
+
+	if (hash_later) {
+		int offset = 0;
+
+		if (hash_later > req->nbytes) {
+			memcpy(rctx->buffer, rctx->buffer + xmit_len,
+			       hash_later - req->nbytes);
+			offset = hash_later - req->nbytes;
+		}
+
+		if (req->nbytes) {
+			scatterwalk_map_and_copy(rctx->buffer + offset,
+						 req->src,
+						 offset + req->nbytes -
+						 hash_later, hash_later, 0);
+		}
+
+		rctx->bufcnt = hash_later;
+	} else {
+		rctx->bufcnt = 0;
+	}
+
+	if (!final)
+		rctx->total = xmit_len;
+
+	return 0;
+}
+
+static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+
+	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
+
+	return 0;
+}
+
+static int omap_sham_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = NULL, *tmp;
+	int bs = 0;
+
+	spin_lock_bh(&sham.lock);
+	if (!tctx->dd) {
+		list_for_each_entry(tmp, &sham.dev_list, list) {
+			dd = tmp;
+			break;
+		}
+		tctx->dd = dd;
+	} else {
+		dd = tctx->dd;
+	}
+	spin_unlock_bh(&sham.lock);
+
+	ctx->dd = dd;
+
+	ctx->flags = 0;
+
+	dev_dbg(dd->dev, "init: digest size: %d\n",
+		crypto_ahash_digestsize(tfm));
+
+	switch (crypto_ahash_digestsize(tfm)) {
+	case MD5_DIGEST_SIZE:
+		ctx->flags |= FLAGS_MODE_MD5;
+		bs = SHA1_BLOCK_SIZE;
+		break;
+	case SHA1_DIGEST_SIZE:
+		ctx->flags |= FLAGS_MODE_SHA1;
+		bs = SHA1_BLOCK_SIZE;
+		break;
+	case SHA224_DIGEST_SIZE:
+		ctx->flags |= FLAGS_MODE_SHA224;
+		bs = SHA224_BLOCK_SIZE;
+		break;
+	case SHA256_DIGEST_SIZE:
+		ctx->flags |= FLAGS_MODE_SHA256;
+		bs = SHA256_BLOCK_SIZE;
+		break;
+	case SHA384_DIGEST_SIZE:
+		ctx->flags |= FLAGS_MODE_SHA384;
+		bs = SHA384_BLOCK_SIZE;
+		break;
+	case SHA512_DIGEST_SIZE:
+		ctx->flags |= FLAGS_MODE_SHA512;
+		bs = SHA512_BLOCK_SIZE;
+		break;
+	}
+
+	ctx->bufcnt = 0;
+	ctx->digcnt = 0;
+	ctx->total = 0;
+	ctx->offset = 0;
+	ctx->buflen = BUFLEN;
+
+	if (tctx->flags & BIT(FLAGS_HMAC)) {
+		if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+			struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+			memcpy(ctx->buffer, bctx->ipad, bs);
+			ctx->bufcnt = bs;
+		}
+
+		ctx->flags |= BIT(FLAGS_HMAC);
+	}
+
+	return 0;
+
+}
+
+static int omap_sham_update_req(struct omap_sham_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int err;
+	bool final = ctx->flags & BIT(FLAGS_FINUP);
+
+	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+		 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
+
+	if (ctx->total < get_block_size(ctx) ||
+	    ctx->total < dd->fallback_sz)
+		ctx->flags |= BIT(FLAGS_CPU);
+
+	if (ctx->flags & BIT(FLAGS_CPU))
+		err = omap_sham_xmit_cpu(dd, ctx->total, final);
+	else
+		err = omap_sham_xmit_dma(dd, ctx->total, final);
+
+	/* wait for dma completion before can take more data */
+	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+
+	return err;
+}
+
+static int omap_sham_final_req(struct omap_sham_dev *dd)
+{
+	struct ahash_request *req = dd->req;
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int err = 0, use_dma = 1;
+
+	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
+		/*
+		 * faster to handle last block with cpu or
+		 * use cpu when dma is not present.
+		 */
+		use_dma = 0;
+
+	if (use_dma)
+		err = omap_sham_xmit_dma(dd, ctx->total, 1);
+	else
+		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
+
+	ctx->bufcnt = 0;
+
+	dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+	return err;
+}
+
+static int omap_sham_finish_hmac(struct ahash_request *req)
+{
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct omap_sham_hmac_ctx *bctx = tctx->base;
+	int bs = crypto_shash_blocksize(bctx->shash);
+	int ds = crypto_shash_digestsize(bctx->shash);
+	SHASH_DESC_ON_STACK(shash, bctx->shash);
+
+	shash->tfm = bctx->shash;
+	shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+
+	return crypto_shash_init(shash) ?:
+	       crypto_shash_update(shash, bctx->opad, bs) ?:
+	       crypto_shash_finup(shash, req->result, ds, req->result);
+}
+
+static int omap_sham_finish(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
+	int err = 0;
+
+	if (ctx->digcnt) {
+		omap_sham_copy_ready_hash(req);
+		if ((ctx->flags & BIT(FLAGS_HMAC)) &&
+				!test_bit(FLAGS_AUTO_XOR, &dd->flags))
+			err = omap_sham_finish_hmac(req);
+	}
+
+	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+
+	return err;
+}
+
+static void omap_sham_finish_req(struct ahash_request *req, int err)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
+
+	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
+		free_pages((unsigned long)sg_virt(ctx->sg),
+			   get_order(ctx->sg->length + ctx->bufcnt));
+
+	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
+		kfree(ctx->sg);
+
+	ctx->sg = NULL;
+
+	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+
+	if (!err) {
+		dd->pdata->copy_hash(req, 1);
+		if (test_bit(FLAGS_FINAL, &dd->flags))
+			err = omap_sham_finish(req);
+	} else {
+		ctx->flags |= BIT(FLAGS_ERROR);
+	}
+
+	/* atomic operation is not needed here */
+	dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
+			BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+
+	if (req->base.complete)
+		req->base.complete(&req->base, err);
+}
+
+static int omap_sham_handle_queue(struct omap_sham_dev *dd,
+				  struct ahash_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct omap_sham_reqctx *ctx;
+	unsigned long flags;
+	int err = 0, ret = 0;
+
+retry:
+	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&dd->queue, req);
+	if (test_bit(FLAGS_BUSY, &dd->flags)) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+	backlog = crypto_get_backlog(&dd->queue);
+	async_req = crypto_dequeue_request(&dd->queue);
+	if (async_req)
+		set_bit(FLAGS_BUSY, &dd->flags);
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ahash_request_cast(async_req);
+	dd->req = req;
+	ctx = ahash_request_ctx(req);
+
+	err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
+	if (err || !ctx->total)
+		goto err1;
+
+	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+						ctx->op, req->nbytes);
+
+	err = omap_sham_hw_init(dd);
+	if (err)
+		goto err1;
+
+	if (ctx->digcnt)
+		/* request has changed - restore hash */
+		dd->pdata->copy_hash(req, 0);
+
+	if (ctx->op == OP_UPDATE) {
+		err = omap_sham_update_req(dd);
+		if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
+			/* no final() after finup() */
+			err = omap_sham_final_req(dd);
+	} else if (ctx->op == OP_FINAL) {
+		err = omap_sham_final_req(dd);
+	}
+err1:
+	dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+	if (err != -EINPROGRESS) {
+		/* done_task will not finish it, so do it here */
+		omap_sham_finish_req(req, err);
+		req = NULL;
+
+		/*
+		 * Execute next request immediately if there is anything
+		 * in queue.
+		 */
+		goto retry;
+	}
+
+	return ret;
+}
+
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct omap_sham_dev *dd = tctx->dd;
+
+	ctx->op = op;
+
+	return omap_sham_handle_queue(dd, req);
+}
+
+static int omap_sham_update(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
+
+	if (!req->nbytes)
+		return 0;
+
+	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
+		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+					 0, req->nbytes, 0);
+		ctx->bufcnt += req->nbytes;
+		return 0;
+	}
+
+	if (dd->polling_mode)
+		ctx->flags |= BIT(FLAGS_CPU);
+
+	return omap_sham_enqueue(req, OP_UPDATE);
+}
+
+static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
+				  const u8 *data, unsigned int len, u8 *out)
+{
+	SHASH_DESC_ON_STACK(shash, tfm);
+
+	shash->tfm = tfm;
+	shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_shash_digest(shash, data, len, out);
+}
+
+static int omap_sham_final_shash(struct ahash_request *req)
+{
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int offset = 0;
+
+	/*
+	 * If we are running HMAC on limited hardware support, skip
+	 * the ipad in the beginning of the buffer if we are going for
+	 * software fallback algorithm.
+	 */
+	if (test_bit(FLAGS_HMAC, &ctx->flags) &&
+	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
+		offset = get_block_size(ctx);
+
+	return omap_sham_shash_digest(tctx->fallback, req->base.flags,
+				      ctx->buffer + offset,
+				      ctx->bufcnt - offset, req->result);
+}
+
+static int omap_sham_final(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+	ctx->flags |= BIT(FLAGS_FINUP);
+
+	if (ctx->flags & BIT(FLAGS_ERROR))
+		return 0; /* uncompleted hash is not needed */
+
+	/*
+	 * OMAP HW accel works only with buffers >= 9.
+	 * HMAC is always >= 9 because ipad == block size.
+	 * If buffersize is less than fallback_sz, we use fallback
+	 * SW encoding, as using DMA + HW in this case doesn't provide
+	 * any benefit.
+	 */
+	if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
+		return omap_sham_final_shash(req);
+	else if (ctx->bufcnt)
+		return omap_sham_enqueue(req, OP_FINAL);
+
+	/* copy ready hash (+ finalize hmac) */
+	return omap_sham_finish(req);
+}
+
+static int omap_sham_finup(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	int err1, err2;
+
+	ctx->flags |= BIT(FLAGS_FINUP);
+
+	err1 = omap_sham_update(req);
+	if (err1 == -EINPROGRESS || err1 == -EBUSY)
+		return err1;
+	/*
+	 * final() has to be always called to cleanup resources
+	 * even if udpate() failed, except EINPROGRESS
+	 */
+	err2 = omap_sham_final(req);
+
+	return err1 ?: err2;
+}
+
+static int omap_sham_digest(struct ahash_request *req)
+{
+	return omap_sham_init(req) ?: omap_sham_finup(req);
+}
+
+static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+		      unsigned int keylen)
+{
+	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct omap_sham_hmac_ctx *bctx = tctx->base;
+	int bs = crypto_shash_blocksize(bctx->shash);
+	int ds = crypto_shash_digestsize(bctx->shash);
+	struct omap_sham_dev *dd = NULL, *tmp;
+	int err, i;
+
+	spin_lock_bh(&sham.lock);
+	if (!tctx->dd) {
+		list_for_each_entry(tmp, &sham.dev_list, list) {
+			dd = tmp;
+			break;
+		}
+		tctx->dd = dd;
+	} else {
+		dd = tctx->dd;
+	}
+	spin_unlock_bh(&sham.lock);
+
+	err = crypto_shash_setkey(tctx->fallback, key, keylen);
+	if (err)
+		return err;
+
+	if (keylen > bs) {
+		err = omap_sham_shash_digest(bctx->shash,
+				crypto_shash_get_flags(bctx->shash),
+				key, keylen, bctx->ipad);
+		if (err)
+			return err;
+		keylen = ds;
+	} else {
+		memcpy(bctx->ipad, key, keylen);
+	}
+
+	memset(bctx->ipad + keylen, 0, bs - keylen);
+
+	if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+		memcpy(bctx->opad, bctx->ipad, bs);
+
+		for (i = 0; i < bs; i++) {
+			bctx->ipad[i] ^= HMAC_IPAD_VALUE;
+			bctx->opad[i] ^= HMAC_OPAD_VALUE;
+		}
+	}
+
+	return err;
+}
+
+static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+
+	/* Allocate a fallback and abort if it failed. */
+	tctx->fallback = crypto_alloc_shash(alg_name, 0,
+					    CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(tctx->fallback)) {
+		pr_err("omap-sham: fallback driver '%s' "
+				"could not be loaded.\n", alg_name);
+		return PTR_ERR(tctx->fallback);
+	}
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct omap_sham_reqctx) + BUFLEN);
+
+	if (alg_base) {
+		struct omap_sham_hmac_ctx *bctx = tctx->base;
+		tctx->flags |= BIT(FLAGS_HMAC);
+		bctx->shash = crypto_alloc_shash(alg_base, 0,
+						CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(bctx->shash)) {
+			pr_err("omap-sham: base driver '%s' "
+					"could not be loaded.\n", alg_base);
+			crypto_free_shash(tctx->fallback);
+			return PTR_ERR(bctx->shash);
+		}
+
+	}
+
+	return 0;
+}
+
+static int omap_sham_cra_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, NULL);
+}
+
+static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, "sha1");
+}
+
+static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, "sha224");
+}
+
+static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, "sha256");
+}
+
+static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, "md5");
+}
+
+static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, "sha384");
+}
+
+static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
+{
+	return omap_sham_cra_init_alg(tfm, "sha512");
+}
+
+static void omap_sham_cra_exit(struct crypto_tfm *tfm)
+{
+	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_shash(tctx->fallback);
+	tctx->fallback = NULL;
+
+	if (tctx->flags & BIT(FLAGS_HMAC)) {
+		struct omap_sham_hmac_ctx *bctx = tctx->base;
+		crypto_free_shash(bctx->shash);
+	}
+}
+
+static int omap_sham_export(struct ahash_request *req, void *out)
+{
+	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+
+	memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
+
+	return 0;
+}
+
+static int omap_sham_import(struct ahash_request *req, const void *in)
+{
+	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+	const struct omap_sham_reqctx *ctx_in = in;
+
+	memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
+
+	return 0;
+}
+
+static struct ahash_alg algs_sha1_md5[] = {
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha1",
+		.cra_driver_name	= "omap-sha1",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.halg.digestsize	= MD5_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "md5",
+		.cra_driver_name	= "omap-md5",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.setkey		= omap_sham_setkey,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "hmac(sha1)",
+		.cra_driver_name	= "omap-hmac-sha1",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
+					sizeof(struct omap_sham_hmac_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_sha1_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.setkey		= omap_sham_setkey,
+	.halg.digestsize	= MD5_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "hmac(md5)",
+		.cra_driver_name	= "omap-hmac-md5",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
+					sizeof(struct omap_sham_hmac_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_md5_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+}
+};
+
+/* OMAP4 has some algs in addition to what OMAP2 has */
+static struct ahash_alg algs_sha224_sha256[] = {
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.halg.digestsize	= SHA224_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha224",
+		.cra_driver_name	= "omap-sha224",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA224_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.halg.digestsize	= SHA256_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha256",
+		.cra_driver_name	= "omap-sha256",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA256_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.setkey		= omap_sham_setkey,
+	.halg.digestsize	= SHA224_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "hmac(sha224)",
+		.cra_driver_name	= "omap-hmac-sha224",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA224_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
+					sizeof(struct omap_sham_hmac_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_sha224_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.setkey		= omap_sham_setkey,
+	.halg.digestsize	= SHA256_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "hmac(sha256)",
+		.cra_driver_name	= "omap-hmac-sha256",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA256_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
+					sizeof(struct omap_sham_hmac_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_sha256_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+};
+
+static struct ahash_alg algs_sha384_sha512[] = {
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.halg.digestsize	= SHA384_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha384",
+		.cra_driver_name	= "omap-sha384",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA384_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.halg.digestsize	= SHA512_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha512",
+		.cra_driver_name	= "omap-sha512",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA512_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.setkey		= omap_sham_setkey,
+	.halg.digestsize	= SHA384_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "hmac(sha384)",
+		.cra_driver_name	= "omap-hmac-sha384",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA384_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
+					sizeof(struct omap_sham_hmac_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_sha384_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+{
+	.init		= omap_sham_init,
+	.update		= omap_sham_update,
+	.final		= omap_sham_final,
+	.finup		= omap_sham_finup,
+	.digest		= omap_sham_digest,
+	.setkey		= omap_sham_setkey,
+	.halg.digestsize	= SHA512_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "hmac(sha512)",
+		.cra_driver_name	= "omap-hmac-sha512",
+		.cra_priority		= 400,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA512_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
+					sizeof(struct omap_sham_hmac_ctx),
+		.cra_alignmask		= OMAP_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= omap_sham_cra_sha512_init,
+		.cra_exit		= omap_sham_cra_exit,
+	}
+},
+};
+
+static void omap_sham_done_task(unsigned long data)
+{
+	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+	int err = 0;
+
+	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
+		omap_sham_handle_queue(dd, NULL);
+		return;
+	}
+
+	if (test_bit(FLAGS_CPU, &dd->flags)) {
+		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
+			goto finish;
+	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
+		if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
+			omap_sham_update_dma_stop(dd);
+			if (dd->err) {
+				err = dd->err;
+				goto finish;
+			}
+		}
+		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
+			/* hash or semi-hash ready */
+			clear_bit(FLAGS_DMA_READY, &dd->flags);
+			goto finish;
+		}
+	}
+
+	return;
+
+finish:
+	dev_dbg(dd->dev, "update done: err: %d\n", err);
+	/* finish curent request */
+	omap_sham_finish_req(dd->req, err);
+
+	/* If we are not busy, process next req */
+	if (!test_bit(FLAGS_BUSY, &dd->flags))
+		omap_sham_handle_queue(dd, NULL);
+}
+
+static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
+{
+	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
+		dev_warn(dd->dev, "Interrupt when no active requests.\n");
+	} else {
+		set_bit(FLAGS_OUTPUT_READY, &dd->flags);
+		tasklet_schedule(&dd->done_task);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
+{
+	struct omap_sham_dev *dd = dev_id;
+
+	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
+		/* final -> allow device to go to power-saving mode */
+		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
+
+	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
+				 SHA_REG_CTRL_OUTPUT_READY);
+	omap_sham_read(dd, SHA_REG_CTRL);
+
+	return omap_sham_irq_common(dd);
+}
+
+static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
+{
+	struct omap_sham_dev *dd = dev_id;
+
+	omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
+
+	return omap_sham_irq_common(dd);
+}
+
+static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
+	{
+		.algs_list	= algs_sha1_md5,
+		.size		= ARRAY_SIZE(algs_sha1_md5),
+	},
+};
+
+static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
+	.algs_info	= omap_sham_algs_info_omap2,
+	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap2),
+	.flags		= BIT(FLAGS_BE32_SHA1),
+	.digest_size	= SHA1_DIGEST_SIZE,
+	.copy_hash	= omap_sham_copy_hash_omap2,
+	.write_ctrl	= omap_sham_write_ctrl_omap2,
+	.trigger	= omap_sham_trigger_omap2,
+	.poll_irq	= omap_sham_poll_irq_omap2,
+	.intr_hdlr	= omap_sham_irq_omap2,
+	.idigest_ofs	= 0x00,
+	.din_ofs	= 0x1c,
+	.digcnt_ofs	= 0x14,
+	.rev_ofs	= 0x5c,
+	.mask_ofs	= 0x60,
+	.sysstatus_ofs	= 0x64,
+	.major_mask	= 0xf0,
+	.major_shift	= 4,
+	.minor_mask	= 0x0f,
+	.minor_shift	= 0,
+};
+
+#ifdef CONFIG_OF
+static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
+	{
+		.algs_list	= algs_sha1_md5,
+		.size		= ARRAY_SIZE(algs_sha1_md5),
+	},
+	{
+		.algs_list	= algs_sha224_sha256,
+		.size		= ARRAY_SIZE(algs_sha224_sha256),
+	},
+};
+
+static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
+	.algs_info	= omap_sham_algs_info_omap4,
+	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap4),
+	.flags		= BIT(FLAGS_AUTO_XOR),
+	.digest_size	= SHA256_DIGEST_SIZE,
+	.copy_hash	= omap_sham_copy_hash_omap4,
+	.write_ctrl	= omap_sham_write_ctrl_omap4,
+	.trigger	= omap_sham_trigger_omap4,
+	.poll_irq	= omap_sham_poll_irq_omap4,
+	.intr_hdlr	= omap_sham_irq_omap4,
+	.idigest_ofs	= 0x020,
+	.odigest_ofs	= 0x0,
+	.din_ofs	= 0x080,
+	.digcnt_ofs	= 0x040,
+	.rev_ofs	= 0x100,
+	.mask_ofs	= 0x110,
+	.sysstatus_ofs	= 0x114,
+	.mode_ofs	= 0x44,
+	.length_ofs	= 0x48,
+	.major_mask	= 0x0700,
+	.major_shift	= 8,
+	.minor_mask	= 0x003f,
+	.minor_shift	= 0,
+};
+
+static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
+	{
+		.algs_list	= algs_sha1_md5,
+		.size		= ARRAY_SIZE(algs_sha1_md5),
+	},
+	{
+		.algs_list	= algs_sha224_sha256,
+		.size		= ARRAY_SIZE(algs_sha224_sha256),
+	},
+	{
+		.algs_list	= algs_sha384_sha512,
+		.size		= ARRAY_SIZE(algs_sha384_sha512),
+	},
+};
+
+static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
+	.algs_info	= omap_sham_algs_info_omap5,
+	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap5),
+	.flags		= BIT(FLAGS_AUTO_XOR),
+	.digest_size	= SHA512_DIGEST_SIZE,
+	.copy_hash	= omap_sham_copy_hash_omap4,
+	.write_ctrl	= omap_sham_write_ctrl_omap4,
+	.trigger	= omap_sham_trigger_omap4,
+	.poll_irq	= omap_sham_poll_irq_omap4,
+	.intr_hdlr	= omap_sham_irq_omap4,
+	.idigest_ofs	= 0x240,
+	.odigest_ofs	= 0x200,
+	.din_ofs	= 0x080,
+	.digcnt_ofs	= 0x280,
+	.rev_ofs	= 0x100,
+	.mask_ofs	= 0x110,
+	.sysstatus_ofs	= 0x114,
+	.mode_ofs	= 0x284,
+	.length_ofs	= 0x288,
+	.major_mask	= 0x0700,
+	.major_shift	= 8,
+	.minor_mask	= 0x003f,
+	.minor_shift	= 0,
+};
+
+static const struct of_device_id omap_sham_of_match[] = {
+	{
+		.compatible	= "ti,omap2-sham",
+		.data		= &omap_sham_pdata_omap2,
+	},
+	{
+		.compatible	= "ti,omap3-sham",
+		.data		= &omap_sham_pdata_omap2,
+	},
+	{
+		.compatible	= "ti,omap4-sham",
+		.data		= &omap_sham_pdata_omap4,
+	},
+	{
+		.compatible	= "ti,omap5-sham",
+		.data		= &omap_sham_pdata_omap5,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, omap_sham_of_match);
+
+static int omap_sham_get_res_of(struct omap_sham_dev *dd,
+		struct device *dev, struct resource *res)
+{
+	struct device_node *node = dev->of_node;
+	int err = 0;
+
+	dd->pdata = of_device_get_match_data(dev);
+	if (!dd->pdata) {
+		dev_err(dev, "no compatible OF match\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	err = of_address_to_resource(node, 0, res);
+	if (err < 0) {
+		dev_err(dev, "can't translate OF node address\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+	dd->irq = irq_of_parse_and_map(node, 0);
+	if (!dd->irq) {
+		dev_err(dev, "can't translate OF irq value\n");
+		err = -EINVAL;
+		goto err;
+	}
+
+err:
+	return err;
+}
+#else
+static const struct of_device_id omap_sham_of_match[] = {
+	{},
+};
+
+static int omap_sham_get_res_of(struct omap_sham_dev *dd,
+		struct device *dev, struct resource *res)
+{
+	return -EINVAL;
+}
+#endif
+
+static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
+		struct platform_device *pdev, struct resource *res)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *r;
+	int err = 0;
+
+	/* Get the base address */
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		dev_err(dev, "no MEM resource info\n");
+		err = -ENODEV;
+		goto err;
+	}
+	memcpy(res, r, sizeof(*res));
+
+	/* Get the IRQ */
+	dd->irq = platform_get_irq(pdev, 0);
+	if (dd->irq < 0) {
+		dev_err(dev, "no IRQ resource info\n");
+		err = dd->irq;
+		goto err;
+	}
+
+	/* Only OMAP2/3 can be non-DT */
+	dd->pdata = &omap_sham_pdata_omap2;
+
+err:
+	return err;
+}
+
+static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct omap_sham_dev *dd = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", dd->fallback_sz);
+}
+
+static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	struct omap_sham_dev *dd = dev_get_drvdata(dev);
+	ssize_t status;
+	long value;
+
+	status = kstrtol(buf, 0, &value);
+	if (status)
+		return status;
+
+	/* HW accelerator only works with buffers > 9 */
+	if (value < 9) {
+		dev_err(dev, "minimum fallback size 9\n");
+		return -EINVAL;
+	}
+
+	dd->fallback_sz = value;
+
+	return size;
+}
+
+static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct omap_sham_dev *dd = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", dd->queue.max_qlen);
+}
+
+static ssize_t queue_len_store(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t size)
+{
+	struct omap_sham_dev *dd = dev_get_drvdata(dev);
+	ssize_t status;
+	long value;
+	unsigned long flags;
+
+	status = kstrtol(buf, 0, &value);
+	if (status)
+		return status;
+
+	if (value < 1)
+		return -EINVAL;
+
+	/*
+	 * Changing the queue size in fly is safe, if size becomes smaller
+	 * than current size, it will just not accept new entries until
+	 * it has shrank enough.
+	 */
+	spin_lock_irqsave(&dd->lock, flags);
+	dd->queue.max_qlen = value;
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	return size;
+}
+
+static DEVICE_ATTR_RW(queue_len);
+static DEVICE_ATTR_RW(fallback);
+
+static struct attribute *omap_sham_attrs[] = {
+	&dev_attr_queue_len.attr,
+	&dev_attr_fallback.attr,
+	NULL,
+};
+
+static struct attribute_group omap_sham_attr_group = {
+	.attrs = omap_sham_attrs,
+};
+
+static int omap_sham_probe(struct platform_device *pdev)
+{
+	struct omap_sham_dev *dd;
+	struct device *dev = &pdev->dev;
+	struct resource res;
+	dma_cap_mask_t mask;
+	int err, i, j;
+	u32 rev;
+
+	dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
+	if (dd == NULL) {
+		dev_err(dev, "unable to alloc data struct.\n");
+		err = -ENOMEM;
+		goto data_err;
+	}
+	dd->dev = dev;
+	platform_set_drvdata(pdev, dd);
+
+	INIT_LIST_HEAD(&dd->list);
+	spin_lock_init(&dd->lock);
+	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
+
+	err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
+			       omap_sham_get_res_pdev(dd, pdev, &res);
+	if (err)
+		goto data_err;
+
+	dd->io_base = devm_ioremap_resource(dev, &res);
+	if (IS_ERR(dd->io_base)) {
+		err = PTR_ERR(dd->io_base);
+		goto data_err;
+	}
+	dd->phys_base = res.start;
+
+	err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
+			       IRQF_TRIGGER_NONE, dev_name(dev), dd);
+	if (err) {
+		dev_err(dev, "unable to request irq %d, err = %d\n",
+			dd->irq, err);
+		goto data_err;
+	}
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	dd->dma_lch = dma_request_chan(dev, "rx");
+	if (IS_ERR(dd->dma_lch)) {
+		err = PTR_ERR(dd->dma_lch);
+		if (err == -EPROBE_DEFER)
+			goto data_err;
+
+		dd->polling_mode = 1;
+		dev_dbg(dev, "using polling mode instead of dma\n");
+	}
+
+	dd->flags |= dd->pdata->flags;
+
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+
+	dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
+
+	pm_runtime_enable(dev);
+	pm_runtime_irq_safe(dev);
+
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		dev_err(dev, "failed to get sync: %d\n", err);
+		goto err_pm;
+	}
+
+	rev = omap_sham_read(dd, SHA_REG_REV(dd));
+	pm_runtime_put_sync(&pdev->dev);
+
+	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
+		(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
+		(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
+
+	spin_lock(&sham.lock);
+	list_add_tail(&dd->list, &sham.dev_list);
+	spin_unlock(&sham.lock);
+
+	for (i = 0; i < dd->pdata->algs_info_size; i++) {
+		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+			struct ahash_alg *alg;
+
+			alg = &dd->pdata->algs_info[i].algs_list[j];
+			alg->export = omap_sham_export;
+			alg->import = omap_sham_import;
+			alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
+					      BUFLEN;
+			err = crypto_register_ahash(alg);
+			if (err)
+				goto err_algs;
+
+			dd->pdata->algs_info[i].registered++;
+		}
+	}
+
+	err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
+	if (err) {
+		dev_err(dev, "could not create sysfs device attrs\n");
+		goto err_algs;
+	}
+
+	return 0;
+
+err_algs:
+	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+			crypto_unregister_ahash(
+					&dd->pdata->algs_info[i].algs_list[j]);
+err_pm:
+	pm_runtime_disable(dev);
+	if (!dd->polling_mode)
+		dma_release_channel(dd->dma_lch);
+data_err:
+	dev_err(dev, "initialization failed.\n");
+
+	return err;
+}
+
+static int omap_sham_remove(struct platform_device *pdev)
+{
+	struct omap_sham_dev *dd;
+	int i, j;
+
+	dd = platform_get_drvdata(pdev);
+	if (!dd)
+		return -ENODEV;
+	spin_lock(&sham.lock);
+	list_del(&dd->list);
+	spin_unlock(&sham.lock);
+	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
+		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+			crypto_unregister_ahash(
+					&dd->pdata->algs_info[i].algs_list[j]);
+	tasklet_kill(&dd->done_task);
+	pm_runtime_disable(&pdev->dev);
+
+	if (!dd->polling_mode)
+		dma_release_channel(dd->dma_lch);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_sham_suspend(struct device *dev)
+{
+	pm_runtime_put_sync(dev);
+	return 0;
+}
+
+static int omap_sham_resume(struct device *dev)
+{
+	int err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		dev_err(dev, "failed to get sync: %d\n", err);
+		return err;
+	}
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
+
+static struct platform_driver omap_sham_driver = {
+	.probe	= omap_sham_probe,
+	.remove	= omap_sham_remove,
+	.driver	= {
+		.name	= "omap-sham",
+		.pm	= &omap_sham_pm_ops,
+		.of_match_table	= omap_sham_of_match,
+	},
+};
+
+module_platform_driver(omap_sham_driver);
+
+MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dmitry Kasatkin");
+MODULE_ALIAS("platform:omap-sham");
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
new file mode 100644
index 0000000..09d823d
--- /dev/null
+++ b/drivers/crypto/padlock-aes.c
@@ -0,0 +1,551 @@
+/* 
+ * Cryptographic API.
+ *
+ * Support for VIA PadLock hardware crypto engine.
+ *
+ * Copyright (c) 2004  Michal Ludvig <michal@logix.cz>
+ *
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/padlock.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include <asm/byteorder.h>
+#include <asm/processor.h>
+#include <asm/fpu/api.h>
+
+/*
+ * Number of data blocks actually fetched for each xcrypt insn.
+ * Processors with prefetch errata will fetch extra blocks.
+ */
+static unsigned int ecb_fetch_blocks = 2;
+#define MAX_ECB_FETCH_BLOCKS (8)
+#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
+
+static unsigned int cbc_fetch_blocks = 1;
+#define MAX_CBC_FETCH_BLOCKS (4)
+#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
+
+/* Control word. */
+struct cword {
+	unsigned int __attribute__ ((__packed__))
+		rounds:4,
+		algo:3,
+		keygen:1,
+		interm:1,
+		encdec:1,
+		ksize:2;
+} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
+
+/* Whenever making any changes to the following
+ * structure *make sure* you keep E, d_data
+ * and cword aligned on 16 Bytes boundaries and
+ * the Hardware can access 16 * 16 bytes of E and d_data
+ * (only the first 15 * 16 bytes matter but the HW reads
+ * more).
+ */
+struct aes_ctx {
+	u32 E[AES_MAX_KEYLENGTH_U32]
+		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
+	u32 d_data[AES_MAX_KEYLENGTH_U32]
+		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
+	struct {
+		struct cword encrypt;
+		struct cword decrypt;
+	} cword;
+	u32 *D;
+};
+
+static DEFINE_PER_CPU(struct cword *, paes_last_cword);
+
+/* Tells whether the ACE is capable to generate
+   the extended key for a given key_len. */
+static inline int
+aes_hw_extkey_available(uint8_t key_len)
+{
+	/* TODO: We should check the actual CPU model/stepping
+	         as it's possible that the capability will be
+	         added in the next CPU revisions. */
+	if (key_len == 16)
+		return 1;
+	return 0;
+}
+
+static inline struct aes_ctx *aes_ctx_common(void *ctx)
+{
+	unsigned long addr = (unsigned long)ctx;
+	unsigned long align = PADLOCK_ALIGNMENT;
+
+	if (align <= crypto_tfm_ctx_alignment())
+		align = 1;
+	return (struct aes_ctx *)ALIGN(addr, align);
+}
+
+static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
+{
+	return aes_ctx_common(crypto_tfm_ctx(tfm));
+}
+
+static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
+{
+	return aes_ctx_common(crypto_blkcipher_ctx(tfm));
+}
+
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+		       unsigned int key_len)
+{
+	struct aes_ctx *ctx = aes_ctx(tfm);
+	const __le32 *key = (const __le32 *)in_key;
+	u32 *flags = &tfm->crt_flags;
+	struct crypto_aes_ctx gen_aes;
+	int cpu;
+
+	if (key_len % 8) {
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	/*
+	 * If the hardware is capable of generating the extended key
+	 * itself we must supply the plain key for both encryption
+	 * and decryption.
+	 */
+	ctx->D = ctx->E;
+
+	ctx->E[0] = le32_to_cpu(key[0]);
+	ctx->E[1] = le32_to_cpu(key[1]);
+	ctx->E[2] = le32_to_cpu(key[2]);
+	ctx->E[3] = le32_to_cpu(key[3]);
+
+	/* Prepare control words. */
+	memset(&ctx->cword, 0, sizeof(ctx->cword));
+
+	ctx->cword.decrypt.encdec = 1;
+	ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
+	ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
+	ctx->cword.encrypt.ksize = (key_len - 16) / 8;
+	ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
+
+	/* Don't generate extended keys if the hardware can do it. */
+	if (aes_hw_extkey_available(key_len))
+		goto ok;
+
+	ctx->D = ctx->d_data;
+	ctx->cword.encrypt.keygen = 1;
+	ctx->cword.decrypt.keygen = 1;
+
+	if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
+	memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
+
+ok:
+	for_each_online_cpu(cpu)
+		if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
+		    &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
+			per_cpu(paes_last_cword, cpu) = NULL;
+
+	return 0;
+}
+
+/* ====== Encryption/decryption routines ====== */
+
+/* These are the real call to PadLock. */
+static inline void padlock_reset_key(struct cword *cword)
+{
+	int cpu = raw_smp_processor_id();
+
+	if (cword != per_cpu(paes_last_cword, cpu))
+#ifndef CONFIG_X86_64
+		asm volatile ("pushfl; popfl");
+#else
+		asm volatile ("pushfq; popfq");
+#endif
+}
+
+static inline void padlock_store_cword(struct cword *cword)
+{
+	per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
+}
+
+/*
+ * While the padlock instructions don't use FP/SSE registers, they
+ * generate a spurious DNA fault when CR0.TS is '1'.  Fortunately,
+ * the kernel doesn't use CR0.TS.
+ */
+
+static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
+				  struct cword *control_word, int count)
+{
+	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
+		      : "+S"(input), "+D"(output)
+		      : "d"(control_word), "b"(key), "c"(count));
+}
+
+static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+				 u8 *iv, struct cword *control_word, int count)
+{
+	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
+		      : "+S" (input), "+D" (output), "+a" (iv)
+		      : "d" (control_word), "b" (key), "c" (count));
+	return iv;
+}
+
+static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
+			   struct cword *cword, int count)
+{
+	/*
+	 * Padlock prefetches extra data so we must provide mapped input buffers.
+	 * Assume there are at least 16 bytes of stack already in use.
+	 */
+	u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
+	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+	memcpy(tmp, in, count * AES_BLOCK_SIZE);
+	rep_xcrypt_ecb(tmp, out, key, cword, count);
+}
+
+static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
+			   u8 *iv, struct cword *cword, int count)
+{
+	/*
+	 * Padlock prefetches extra data so we must provide mapped input buffers.
+	 * Assume there are at least 16 bytes of stack already in use.
+	 */
+	u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
+	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+	memcpy(tmp, in, count * AES_BLOCK_SIZE);
+	return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
+}
+
+static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
+			     struct cword *cword, int count)
+{
+	/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
+	 * We could avoid some copying here but it's probably not worth it.
+	 */
+	if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
+		ecb_crypt_copy(in, out, key, cword, count);
+		return;
+	}
+
+	rep_xcrypt_ecb(in, out, key, cword, count);
+}
+
+static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
+			    u8 *iv, struct cword *cword, int count)
+{
+	/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
+	if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
+		return cbc_crypt_copy(in, out, key, iv, cword, count);
+
+	return rep_xcrypt_cbc(in, out, key, iv, cword, count);
+}
+
+static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
+				      void *control_word, u32 count)
+{
+	u32 initial = count & (ecb_fetch_blocks - 1);
+
+	if (count < ecb_fetch_blocks) {
+		ecb_crypt(input, output, key, control_word, count);
+		return;
+	}
+
+	count -= initial;
+
+	if (initial)
+		asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
+			      : "+S"(input), "+D"(output)
+			      : "d"(control_word), "b"(key), "c"(initial));
+
+	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
+		      : "+S"(input), "+D"(output)
+		      : "d"(control_word), "b"(key), "c"(count));
+}
+
+static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+				     u8 *iv, void *control_word, u32 count)
+{
+	u32 initial = count & (cbc_fetch_blocks - 1);
+
+	if (count < cbc_fetch_blocks)
+		return cbc_crypt(input, output, key, iv, control_word, count);
+
+	count -= initial;
+
+	if (initial)
+		asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
+			      : "+S" (input), "+D" (output), "+a" (iv)
+			      : "d" (control_word), "b" (key), "c" (initial));
+
+	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
+		      : "+S" (input), "+D" (output), "+a" (iv)
+		      : "d" (control_word), "b" (key), "c" (count));
+	return iv;
+}
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+	struct aes_ctx *ctx = aes_ctx(tfm);
+
+	padlock_reset_key(&ctx->cword.encrypt);
+	ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
+	padlock_store_cword(&ctx->cword.encrypt);
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+	struct aes_ctx *ctx = aes_ctx(tfm);
+
+	padlock_reset_key(&ctx->cword.encrypt);
+	ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
+	padlock_store_cword(&ctx->cword.encrypt);
+}
+
+static struct crypto_alg aes_alg = {
+	.cra_name		=	"aes",
+	.cra_driver_name	=	"aes-padlock",
+	.cra_priority		=	PADLOCK_CRA_PRIORITY,
+	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct aes_ctx),
+	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
+	.cra_module		=	THIS_MODULE,
+	.cra_u			=	{
+		.cipher = {
+			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
+			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
+			.cia_setkey	   	= 	aes_set_key,
+			.cia_encrypt	 	=	aes_encrypt,
+			.cia_decrypt	  	=	aes_decrypt,
+		}
+	}
+};
+
+static int ecb_aes_encrypt(struct blkcipher_desc *desc,
+			   struct scatterlist *dst, struct scatterlist *src,
+			   unsigned int nbytes)
+{
+	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	padlock_reset_key(&ctx->cword.encrypt);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+				   ctx->E, &ctx->cword.encrypt,
+				   nbytes / AES_BLOCK_SIZE);
+		nbytes &= AES_BLOCK_SIZE - 1;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	padlock_store_cword(&ctx->cword.encrypt);
+
+	return err;
+}
+
+static int ecb_aes_decrypt(struct blkcipher_desc *desc,
+			   struct scatterlist *dst, struct scatterlist *src,
+			   unsigned int nbytes)
+{
+	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	padlock_reset_key(&ctx->cword.decrypt);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+				   ctx->D, &ctx->cword.decrypt,
+				   nbytes / AES_BLOCK_SIZE);
+		nbytes &= AES_BLOCK_SIZE - 1;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	padlock_store_cword(&ctx->cword.encrypt);
+
+	return err;
+}
+
+static struct crypto_alg ecb_aes_alg = {
+	.cra_name		=	"ecb(aes)",
+	.cra_driver_name	=	"ecb-aes-padlock",
+	.cra_priority		=	PADLOCK_COMPOSITE_PRIORITY,
+	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct aes_ctx),
+	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
+	.cra_type		=	&crypto_blkcipher_type,
+	.cra_module		=	THIS_MODULE,
+	.cra_u			=	{
+		.blkcipher = {
+			.min_keysize		=	AES_MIN_KEY_SIZE,
+			.max_keysize		=	AES_MAX_KEY_SIZE,
+			.setkey	   		= 	aes_set_key,
+			.encrypt		=	ecb_aes_encrypt,
+			.decrypt		=	ecb_aes_decrypt,
+		}
+	}
+};
+
+static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+			   struct scatterlist *dst, struct scatterlist *src,
+			   unsigned int nbytes)
+{
+	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	padlock_reset_key(&ctx->cword.encrypt);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
+					    walk.dst.virt.addr, ctx->E,
+					    walk.iv, &ctx->cword.encrypt,
+					    nbytes / AES_BLOCK_SIZE);
+		memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+		nbytes &= AES_BLOCK_SIZE - 1;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	padlock_store_cword(&ctx->cword.decrypt);
+
+	return err;
+}
+
+static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+			   struct scatterlist *dst, struct scatterlist *src,
+			   unsigned int nbytes)
+{
+	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	padlock_reset_key(&ctx->cword.encrypt);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
+				   ctx->D, walk.iv, &ctx->cword.decrypt,
+				   nbytes / AES_BLOCK_SIZE);
+		nbytes &= AES_BLOCK_SIZE - 1;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	padlock_store_cword(&ctx->cword.encrypt);
+
+	return err;
+}
+
+static struct crypto_alg cbc_aes_alg = {
+	.cra_name		=	"cbc(aes)",
+	.cra_driver_name	=	"cbc-aes-padlock",
+	.cra_priority		=	PADLOCK_COMPOSITE_PRIORITY,
+	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct aes_ctx),
+	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
+	.cra_type		=	&crypto_blkcipher_type,
+	.cra_module		=	THIS_MODULE,
+	.cra_u			=	{
+		.blkcipher = {
+			.min_keysize		=	AES_MIN_KEY_SIZE,
+			.max_keysize		=	AES_MAX_KEY_SIZE,
+			.ivsize			=	AES_BLOCK_SIZE,
+			.setkey	   		= 	aes_set_key,
+			.encrypt		=	cbc_aes_encrypt,
+			.decrypt		=	cbc_aes_decrypt,
+		}
+	}
+};
+
+static const struct x86_cpu_id padlock_cpu_id[] = {
+	X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
+	{}
+};
+MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
+
+static int __init padlock_init(void)
+{
+	int ret;
+	struct cpuinfo_x86 *c = &cpu_data(0);
+
+	if (!x86_match_cpu(padlock_cpu_id))
+		return -ENODEV;
+
+	if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
+		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
+		return -ENODEV;
+	}
+
+	if ((ret = crypto_register_alg(&aes_alg)))
+		goto aes_err;
+
+	if ((ret = crypto_register_alg(&ecb_aes_alg)))
+		goto ecb_aes_err;
+
+	if ((ret = crypto_register_alg(&cbc_aes_alg)))
+		goto cbc_aes_err;
+
+	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+
+	if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
+		ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
+		cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
+		printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
+	}
+
+out:
+	return ret;
+
+cbc_aes_err:
+	crypto_unregister_alg(&ecb_aes_alg);
+ecb_aes_err:
+	crypto_unregister_alg(&aes_alg);
+aes_err:
+	printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
+	goto out;
+}
+
+static void __exit padlock_fini(void)
+{
+	crypto_unregister_alg(&cbc_aes_alg);
+	crypto_unregister_alg(&ecb_aes_alg);
+	crypto_unregister_alg(&aes_alg);
+}
+
+module_init(padlock_init);
+module_exit(padlock_fini);
+
+MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Ludvig");
+
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
new file mode 100644
index 0000000..21e5cae
--- /dev/null
+++ b/drivers/crypto/padlock-sha.c
@@ -0,0 +1,577 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for VIA PadLock hardware crypto engine.
+ *
+ * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/padlock.h>
+#include <crypto/sha.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include <asm/cpu_device_id.h>
+#include <asm/fpu/api.h>
+
+struct padlock_sha_desc {
+	struct shash_desc fallback;
+};
+
+struct padlock_sha_ctx {
+	struct crypto_shash *fallback;
+};
+
+static int padlock_sha_init(struct shash_desc *desc)
+{
+	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+
+	dctx->fallback.tfm = ctx->fallback;
+	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	return crypto_shash_init(&dctx->fallback);
+}
+
+static int padlock_sha_update(struct shash_desc *desc,
+			      const u8 *data, unsigned int length)
+{
+	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+
+	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	return crypto_shash_update(&dctx->fallback, data, length);
+}
+
+static int padlock_sha_export(struct shash_desc *desc, void *out)
+{
+	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+
+	return crypto_shash_export(&dctx->fallback, out);
+}
+
+static int padlock_sha_import(struct shash_desc *desc, const void *in)
+{
+	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+
+	dctx->fallback.tfm = ctx->fallback;
+	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	return crypto_shash_import(&dctx->fallback, in);
+}
+
+static inline void padlock_output_block(uint32_t *src,
+		 	uint32_t *dst, size_t count)
+{
+	while (count--)
+		*dst++ = swab32(*src++);
+}
+
+static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
+			      unsigned int count, u8 *out)
+{
+	/* We can't store directly to *out as it may be unaligned. */
+	/* BTW Don't reduce the buffer size below 128 Bytes!
+	 *     PadLock microcode needs it that big. */
+	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+		((aligned(STACK_ALIGN)));
+	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+	struct sha1_state state;
+	unsigned int space;
+	unsigned int leftover;
+	int err;
+
+	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	err = crypto_shash_export(&dctx->fallback, &state);
+	if (err)
+		goto out;
+
+	if (state.count + count > ULONG_MAX)
+		return crypto_shash_finup(&dctx->fallback, in, count, out);
+
+	leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
+	space =  SHA1_BLOCK_SIZE - leftover;
+	if (space) {
+		if (count > space) {
+			err = crypto_shash_update(&dctx->fallback, in, space) ?:
+			      crypto_shash_export(&dctx->fallback, &state);
+			if (err)
+				goto out;
+			count -= space;
+			in += space;
+		} else {
+			memcpy(state.buffer + leftover, in, count);
+			in = state.buffer;
+			count += leftover;
+			state.count &= ~(SHA1_BLOCK_SIZE - 1);
+		}
+	}
+
+	memcpy(result, &state.state, SHA1_DIGEST_SIZE);
+
+	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
+		      : \
+		      : "c"((unsigned long)state.count + count), \
+			"a"((unsigned long)state.count), \
+			"S"(in), "D"(result));
+
+	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+
+out:
+	return err;
+}
+
+static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
+{
+	u8 buf[4];
+
+	return padlock_sha1_finup(desc, buf, 0, out);
+}
+
+static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
+				unsigned int count, u8 *out)
+{
+	/* We can't store directly to *out as it may be unaligned. */
+	/* BTW Don't reduce the buffer size below 128 Bytes!
+	 *     PadLock microcode needs it that big. */
+	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+		((aligned(STACK_ALIGN)));
+	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+	struct sha256_state state;
+	unsigned int space;
+	unsigned int leftover;
+	int err;
+
+	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+	err = crypto_shash_export(&dctx->fallback, &state);
+	if (err)
+		goto out;
+
+	if (state.count + count > ULONG_MAX)
+		return crypto_shash_finup(&dctx->fallback, in, count, out);
+
+	leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
+	space =  SHA256_BLOCK_SIZE - leftover;
+	if (space) {
+		if (count > space) {
+			err = crypto_shash_update(&dctx->fallback, in, space) ?:
+			      crypto_shash_export(&dctx->fallback, &state);
+			if (err)
+				goto out;
+			count -= space;
+			in += space;
+		} else {
+			memcpy(state.buf + leftover, in, count);
+			in = state.buf;
+			count += leftover;
+			state.count &= ~(SHA1_BLOCK_SIZE - 1);
+		}
+	}
+
+	memcpy(result, &state.state, SHA256_DIGEST_SIZE);
+
+	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
+		      : \
+		      : "c"((unsigned long)state.count + count), \
+			"a"((unsigned long)state.count), \
+			"S"(in), "D"(result));
+
+	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
+
+out:
+	return err;
+}
+
+static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
+{
+	u8 buf[4];
+
+	return padlock_sha256_finup(desc, buf, 0, out);
+}
+
+static int padlock_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_shash *hash = __crypto_shash_cast(tfm);
+	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
+	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_shash *fallback_tfm;
+	int err = -ENOMEM;
+
+	/* Allocate a fallback and abort if it failed. */
+	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+					  CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback_tfm)) {
+		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
+		       fallback_driver_name);
+		err = PTR_ERR(fallback_tfm);
+		goto out;
+	}
+
+	ctx->fallback = fallback_tfm;
+	hash->descsize += crypto_shash_descsize(fallback_tfm);
+	return 0;
+
+out:
+	return err;
+}
+
+static void padlock_cra_exit(struct crypto_tfm *tfm)
+{
+	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_shash(ctx->fallback);
+}
+
+static struct shash_alg sha1_alg = {
+	.digestsize	=	SHA1_DIGEST_SIZE,
+	.init   	= 	padlock_sha_init,
+	.update 	=	padlock_sha_update,
+	.finup  	=	padlock_sha1_finup,
+	.final  	=	padlock_sha1_final,
+	.export		=	padlock_sha_export,
+	.import		=	padlock_sha_import,
+	.descsize	=	sizeof(struct padlock_sha_desc),
+	.statesize	=	sizeof(struct sha1_state),
+	.base		=	{
+		.cra_name		=	"sha1",
+		.cra_driver_name	=	"sha1-padlock",
+		.cra_priority		=	PADLOCK_CRA_PRIORITY,
+		.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		=	SHA1_BLOCK_SIZE,
+		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
+		.cra_module		=	THIS_MODULE,
+		.cra_init		=	padlock_cra_init,
+		.cra_exit		=	padlock_cra_exit,
+	}
+};
+
+static struct shash_alg sha256_alg = {
+	.digestsize	=	SHA256_DIGEST_SIZE,
+	.init   	= 	padlock_sha_init,
+	.update 	=	padlock_sha_update,
+	.finup  	=	padlock_sha256_finup,
+	.final  	=	padlock_sha256_final,
+	.export		=	padlock_sha_export,
+	.import		=	padlock_sha_import,
+	.descsize	=	sizeof(struct padlock_sha_desc),
+	.statesize	=	sizeof(struct sha256_state),
+	.base		=	{
+		.cra_name		=	"sha256",
+		.cra_driver_name	=	"sha256-padlock",
+		.cra_priority		=	PADLOCK_CRA_PRIORITY,
+		.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		=	SHA256_BLOCK_SIZE,
+		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
+		.cra_module		=	THIS_MODULE,
+		.cra_init		=	padlock_cra_init,
+		.cra_exit		=	padlock_cra_exit,
+	}
+};
+
+/* Add two shash_alg instance for hardware-implemented *
+* multiple-parts hash supported by VIA Nano Processor.*/
+static int padlock_sha1_init_nano(struct shash_desc *desc)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	*sctx = (struct sha1_state){
+		.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+	};
+
+	return 0;
+}
+
+static int padlock_sha1_update_nano(struct shash_desc *desc,
+			const u8 *data,	unsigned int len)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial, done;
+	const u8 *src;
+	/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
+	u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+		((aligned(STACK_ALIGN)));
+	u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+	partial = sctx->count & 0x3f;
+	sctx->count += len;
+	done = 0;
+	src = data;
+	memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
+
+	if ((partial + len) >= SHA1_BLOCK_SIZE) {
+
+		/* Append the bytes in state's buffer to a block to handle */
+		if (partial) {
+			done = -partial;
+			memcpy(sctx->buffer + partial, data,
+				done + SHA1_BLOCK_SIZE);
+			src = sctx->buffer;
+			asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
+			: "+S"(src), "+D"(dst) \
+			: "a"((long)-1), "c"((unsigned long)1));
+			done += SHA1_BLOCK_SIZE;
+			src = data + done;
+		}
+
+		/* Process the left bytes from the input data */
+		if (len - done >= SHA1_BLOCK_SIZE) {
+			asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
+			: "+S"(src), "+D"(dst)
+			: "a"((long)-1),
+			"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
+			done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
+			src = data + done;
+		}
+		partial = 0;
+	}
+	memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
+	memcpy(sctx->buffer + partial, src, len - done);
+
+	return 0;
+}
+
+static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
+{
+	struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
+	unsigned int partial, padlen;
+	__be64 bits;
+	static const u8 padding[64] = { 0x80, };
+
+	bits = cpu_to_be64(state->count << 3);
+
+	/* Pad out to 56 mod 64 */
+	partial = state->count & 0x3f;
+	padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
+	padlock_sha1_update_nano(desc, padding, padlen);
+
+	/* Append length field bytes */
+	padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
+
+	/* Swap to output */
+	padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
+
+	return 0;
+}
+
+static int padlock_sha256_init_nano(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	*sctx = (struct sha256_state){
+		.state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
+				SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
+	};
+
+	return 0;
+}
+
+static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
+			  unsigned int len)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial, done;
+	const u8 *src;
+	/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
+	u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+		((aligned(STACK_ALIGN)));
+	u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+	partial = sctx->count & 0x3f;
+	sctx->count += len;
+	done = 0;
+	src = data;
+	memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
+
+	if ((partial + len) >= SHA256_BLOCK_SIZE) {
+
+		/* Append the bytes in state's buffer to a block to handle */
+		if (partial) {
+			done = -partial;
+			memcpy(sctx->buf + partial, data,
+				done + SHA256_BLOCK_SIZE);
+			src = sctx->buf;
+			asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
+			: "+S"(src), "+D"(dst)
+			: "a"((long)-1), "c"((unsigned long)1));
+			done += SHA256_BLOCK_SIZE;
+			src = data + done;
+		}
+
+		/* Process the left bytes from input data*/
+		if (len - done >= SHA256_BLOCK_SIZE) {
+			asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
+			: "+S"(src), "+D"(dst)
+			: "a"((long)-1),
+			"c"((unsigned long)((len - done) / 64)));
+			done += ((len - done) - (len - done) % 64);
+			src = data + done;
+		}
+		partial = 0;
+	}
+	memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
+	memcpy(sctx->buf + partial, src, len - done);
+
+	return 0;
+}
+
+static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
+{
+	struct sha256_state *state =
+		(struct sha256_state *)shash_desc_ctx(desc);
+	unsigned int partial, padlen;
+	__be64 bits;
+	static const u8 padding[64] = { 0x80, };
+
+	bits = cpu_to_be64(state->count << 3);
+
+	/* Pad out to 56 mod 64 */
+	partial = state->count & 0x3f;
+	padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
+	padlock_sha256_update_nano(desc, padding, padlen);
+
+	/* Append length field bytes */
+	padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
+
+	/* Swap to output */
+	padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
+
+	return 0;
+}
+
+static int padlock_sha_export_nano(struct shash_desc *desc,
+				void *out)
+{
+	int statesize = crypto_shash_statesize(desc->tfm);
+	void *sctx = shash_desc_ctx(desc);
+
+	memcpy(out, sctx, statesize);
+	return 0;
+}
+
+static int padlock_sha_import_nano(struct shash_desc *desc,
+				const void *in)
+{
+	int statesize = crypto_shash_statesize(desc->tfm);
+	void *sctx = shash_desc_ctx(desc);
+
+	memcpy(sctx, in, statesize);
+	return 0;
+}
+
+static struct shash_alg sha1_alg_nano = {
+	.digestsize	=	SHA1_DIGEST_SIZE,
+	.init		=	padlock_sha1_init_nano,
+	.update		=	padlock_sha1_update_nano,
+	.final		=	padlock_sha1_final_nano,
+	.export		=	padlock_sha_export_nano,
+	.import		=	padlock_sha_import_nano,
+	.descsize	=	sizeof(struct sha1_state),
+	.statesize	=	sizeof(struct sha1_state),
+	.base		=	{
+		.cra_name		=	"sha1",
+		.cra_driver_name	=	"sha1-padlock-nano",
+		.cra_priority		=	PADLOCK_CRA_PRIORITY,
+		.cra_blocksize		=	SHA1_BLOCK_SIZE,
+		.cra_module		=	THIS_MODULE,
+	}
+};
+
+static struct shash_alg sha256_alg_nano = {
+	.digestsize	=	SHA256_DIGEST_SIZE,
+	.init		=	padlock_sha256_init_nano,
+	.update		=	padlock_sha256_update_nano,
+	.final		=	padlock_sha256_final_nano,
+	.export		=	padlock_sha_export_nano,
+	.import		=	padlock_sha_import_nano,
+	.descsize	=	sizeof(struct sha256_state),
+	.statesize	=	sizeof(struct sha256_state),
+	.base		=	{
+		.cra_name		=	"sha256",
+		.cra_driver_name	=	"sha256-padlock-nano",
+		.cra_priority		=	PADLOCK_CRA_PRIORITY,
+		.cra_blocksize		=	SHA256_BLOCK_SIZE,
+		.cra_module		=	THIS_MODULE,
+	}
+};
+
+static const struct x86_cpu_id padlock_sha_ids[] = {
+	X86_FEATURE_MATCH(X86_FEATURE_PHE),
+	{}
+};
+MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids);
+
+static int __init padlock_init(void)
+{
+	int rc = -ENODEV;
+	struct cpuinfo_x86 *c = &cpu_data(0);
+	struct shash_alg *sha1;
+	struct shash_alg *sha256;
+
+	if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
+		return -ENODEV;
+
+	/* Register the newly added algorithm module if on *
+	* VIA Nano processor, or else just do as before */
+	if (c->x86_model < 0x0f) {
+		sha1 = &sha1_alg;
+		sha256 = &sha256_alg;
+	} else {
+		sha1 = &sha1_alg_nano;
+		sha256 = &sha256_alg_nano;
+	}
+
+	rc = crypto_register_shash(sha1);
+	if (rc)
+		goto out;
+
+	rc = crypto_register_shash(sha256);
+	if (rc)
+		goto out_unreg1;
+
+	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
+
+	return 0;
+
+out_unreg1:
+	crypto_unregister_shash(sha1);
+
+out:
+	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
+	return rc;
+}
+
+static void __exit padlock_fini(void)
+{
+	struct cpuinfo_x86 *c = &cpu_data(0);
+
+	if (c->x86_model >= 0x0f) {
+		crypto_unregister_shash(&sha1_alg_nano);
+		crypto_unregister_shash(&sha256_alg_nano);
+	} else {
+		crypto_unregister_shash(&sha1_alg);
+		crypto_unregister_shash(&sha256_alg);
+	}
+}
+
+module_init(padlock_init);
+module_exit(padlock_fini);
+
+MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Ludvig");
+
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-padlock");
+MODULE_ALIAS_CRYPTO("sha256-padlock");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
new file mode 100644
index 0000000..321d5e2
--- /dev/null
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -0,0 +1,1812 @@
+/*
+ * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/rtnetlink.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "picoxcell_crypto_regs.h"
+
+/*
+ * The threshold for the number of entries in the CMD FIFO available before
+ * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
+ * number of interrupts raised to the CPU.
+ */
+#define CMD0_IRQ_THRESHOLD   1
+
+/*
+ * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
+ * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
+ * When there are packets in flight but lower than the threshold, we enable
+ * the timer and at expiry, attempt to remove any processed packets from the
+ * queue and if there are still packets left, schedule the timer again.
+ */
+#define PACKET_TIMEOUT	    1
+
+/* The priority to register each algorithm with. */
+#define SPACC_CRYPTO_ALG_PRIORITY	10000
+
+#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN	16
+#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
+#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ	64
+#define SPACC_CRYPTO_IPSEC_MAX_CTXS	32
+#define SPACC_CRYPTO_IPSEC_FIFO_SZ	32
+#define SPACC_CRYPTO_L2_CIPHER_PG_SZ	64
+#define SPACC_CRYPTO_L2_HASH_PG_SZ	64
+#define SPACC_CRYPTO_L2_MAX_CTXS	128
+#define SPACC_CRYPTO_L2_FIFO_SZ		128
+
+#define MAX_DDT_LEN			16
+
+/* DDT format. This must match the hardware DDT format exactly. */
+struct spacc_ddt {
+	dma_addr_t	p;
+	u32		len;
+};
+
+/*
+ * Asynchronous crypto request structure.
+ *
+ * This structure defines a request that is either queued for processing or
+ * being processed.
+ */
+struct spacc_req {
+	struct list_head		list;
+	struct spacc_engine		*engine;
+	struct crypto_async_request	*req;
+	int				result;
+	bool				is_encrypt;
+	unsigned			ctx_id;
+	dma_addr_t			src_addr, dst_addr;
+	struct spacc_ddt		*src_ddt, *dst_ddt;
+	void				(*complete)(struct spacc_req *req);
+};
+
+struct spacc_aead {
+	unsigned long			ctrl_default;
+	unsigned long			type;
+	struct aead_alg			alg;
+	struct spacc_engine		*engine;
+	struct list_head		entry;
+	int				key_offs;
+	int				iv_offs;
+};
+
+struct spacc_engine {
+	void __iomem			*regs;
+	struct list_head		pending;
+	int				next_ctx;
+	spinlock_t			hw_lock;
+	int				in_flight;
+	struct list_head		completed;
+	struct list_head		in_progress;
+	struct tasklet_struct		complete;
+	unsigned long			fifo_sz;
+	void __iomem			*cipher_ctx_base;
+	void __iomem			*hash_key_base;
+	struct spacc_alg		*algs;
+	unsigned			num_algs;
+	struct list_head		registered_algs;
+	struct spacc_aead		*aeads;
+	unsigned			num_aeads;
+	struct list_head		registered_aeads;
+	size_t				cipher_pg_sz;
+	size_t				hash_pg_sz;
+	const char			*name;
+	struct clk			*clk;
+	struct device			*dev;
+	unsigned			max_ctxs;
+	struct timer_list		packet_timeout;
+	unsigned			stat_irq_thresh;
+	struct dma_pool			*req_pool;
+};
+
+/* Algorithm type mask. */
+#define SPACC_CRYPTO_ALG_MASK		0x7
+
+/* SPACC definition of a crypto algorithm. */
+struct spacc_alg {
+	unsigned long			ctrl_default;
+	unsigned long			type;
+	struct crypto_alg		alg;
+	struct spacc_engine		*engine;
+	struct list_head		entry;
+	int				key_offs;
+	int				iv_offs;
+};
+
+/* Generic context structure for any algorithm type. */
+struct spacc_generic_ctx {
+	struct spacc_engine		*engine;
+	int				flags;
+	int				key_offs;
+	int				iv_offs;
+};
+
+/* Block cipher context. */
+struct spacc_ablk_ctx {
+	struct spacc_generic_ctx	generic;
+	u8				key[AES_MAX_KEY_SIZE];
+	u8				key_len;
+	/*
+	 * The fallback cipher. If the operation can't be done in hardware,
+	 * fallback to a software version.
+	 */
+	struct crypto_skcipher		*sw_cipher;
+};
+
+/* AEAD cipher context. */
+struct spacc_aead_ctx {
+	struct spacc_generic_ctx	generic;
+	u8				cipher_key[AES_MAX_KEY_SIZE];
+	u8				hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
+	u8				cipher_key_len;
+	u8				hash_key_len;
+	struct crypto_aead		*sw_cipher;
+};
+
+static int spacc_ablk_submit(struct spacc_req *req);
+
+static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
+{
+	return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
+}
+
+static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
+{
+	return container_of(alg, struct spacc_aead, alg);
+}
+
+static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
+{
+	u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
+
+	return fifo_stat & SPA_FIFO_CMD_FULL;
+}
+
+/*
+ * Given a cipher context, and a context number, get the base address of the
+ * context page.
+ *
+ * Returns the address of the context page where the key/context may
+ * be written.
+ */
+static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
+						unsigned indx,
+						bool is_cipher_ctx)
+{
+	return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
+			(indx * ctx->engine->cipher_pg_sz) :
+		ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
+}
+
+/* The context pages can only be written with 32-bit accesses. */
+static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
+				 unsigned count)
+{
+	const u32 *src32 = (const u32 *) src;
+
+	while (count--)
+		writel(*src32++, dst++);
+}
+
+static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
+				   void __iomem *page_addr, const u8 *key,
+				   size_t key_len, const u8 *iv, size_t iv_len)
+{
+	void __iomem *key_ptr = page_addr + ctx->key_offs;
+	void __iomem *iv_ptr = page_addr + ctx->iv_offs;
+
+	memcpy_toio32(key_ptr, key, key_len / 4);
+	memcpy_toio32(iv_ptr, iv, iv_len / 4);
+}
+
+/*
+ * Load a context into the engines context memory.
+ *
+ * Returns the index of the context page where the context was loaded.
+ */
+static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
+			       const u8 *ciph_key, size_t ciph_len,
+			       const u8 *iv, size_t ivlen, const u8 *hash_key,
+			       size_t hash_len)
+{
+	unsigned indx = ctx->engine->next_ctx++;
+	void __iomem *ciph_page_addr, *hash_page_addr;
+
+	ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
+	hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
+
+	ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
+	spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
+			       ivlen);
+	writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
+	       (1 << SPA_KEY_SZ_CIPHER_OFFSET),
+	       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
+
+	if (hash_key) {
+		memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
+		writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
+		       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
+	}
+
+	return indx;
+}
+
+static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
+{
+	ddt->p = phys;
+	ddt->len = len;
+}
+
+/*
+ * Take a crypto request and scatterlists for the data and turn them into DDTs
+ * for passing to the crypto engines. This also DMA maps the data so that the
+ * crypto engines can DMA to/from them.
+ */
+static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
+					 struct scatterlist *payload,
+					 unsigned nbytes,
+					 enum dma_data_direction dir,
+					 dma_addr_t *ddt_phys)
+{
+	unsigned mapped_ents;
+	struct scatterlist *cur;
+	struct spacc_ddt *ddt;
+	int i;
+	int nents;
+
+	nents = sg_nents_for_len(payload, nbytes);
+	if (nents < 0) {
+		dev_err(engine->dev, "Invalid numbers of SG.\n");
+		return NULL;
+	}
+	mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
+
+	if (mapped_ents + 1 > MAX_DDT_LEN)
+		goto out;
+
+	ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
+	if (!ddt)
+		goto out;
+
+	for_each_sg(payload, cur, mapped_ents, i)
+		ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
+	ddt_set(&ddt[mapped_ents], 0, 0);
+
+	return ddt;
+
+out:
+	dma_unmap_sg(engine->dev, payload, nents, dir);
+	return NULL;
+}
+
+static int spacc_aead_make_ddts(struct aead_request *areq)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct spacc_req *req = aead_request_ctx(areq);
+	struct spacc_engine *engine = req->engine;
+	struct spacc_ddt *src_ddt, *dst_ddt;
+	unsigned total;
+	int src_nents, dst_nents;
+	struct scatterlist *cur;
+	int i, dst_ents, src_ents;
+
+	total = areq->assoclen + areq->cryptlen;
+	if (req->is_encrypt)
+		total += crypto_aead_authsize(aead);
+
+	src_nents = sg_nents_for_len(areq->src, total);
+	if (src_nents < 0) {
+		dev_err(engine->dev, "Invalid numbers of src SG.\n");
+		return src_nents;
+	}
+	if (src_nents + 1 > MAX_DDT_LEN)
+		return -E2BIG;
+
+	dst_nents = 0;
+	if (areq->src != areq->dst) {
+		dst_nents = sg_nents_for_len(areq->dst, total);
+		if (dst_nents < 0) {
+			dev_err(engine->dev, "Invalid numbers of dst SG.\n");
+			return dst_nents;
+		}
+		if (src_nents + 1 > MAX_DDT_LEN)
+			return -E2BIG;
+	}
+
+	src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
+	if (!src_ddt)
+		goto err;
+
+	dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
+	if (!dst_ddt)
+		goto err_free_src;
+
+	req->src_ddt = src_ddt;
+	req->dst_ddt = dst_ddt;
+
+	if (dst_nents) {
+		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
+				      DMA_TO_DEVICE);
+		if (!src_ents)
+			goto err_free_dst;
+
+		dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
+				      DMA_FROM_DEVICE);
+
+		if (!dst_ents) {
+			dma_unmap_sg(engine->dev, areq->src, src_nents,
+				     DMA_TO_DEVICE);
+			goto err_free_dst;
+		}
+	} else {
+		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
+				      DMA_BIDIRECTIONAL);
+		if (!src_ents)
+			goto err_free_dst;
+		dst_ents = src_ents;
+	}
+
+	/*
+	 * Now map in the payload for the source and destination and terminate
+	 * with the NULL pointers.
+	 */
+	for_each_sg(areq->src, cur, src_ents, i)
+		ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
+
+	/* For decryption we need to skip the associated data. */
+	total = req->is_encrypt ? 0 : areq->assoclen;
+	for_each_sg(areq->dst, cur, dst_ents, i) {
+		unsigned len = sg_dma_len(cur);
+
+		if (len <= total) {
+			total -= len;
+			continue;
+		}
+
+		ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
+	}
+
+	ddt_set(src_ddt, 0, 0);
+	ddt_set(dst_ddt, 0, 0);
+
+	return 0;
+
+err_free_dst:
+	dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
+err_free_src:
+	dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
+err:
+	return -ENOMEM;
+}
+
+static void spacc_aead_free_ddts(struct spacc_req *req)
+{
+	struct aead_request *areq = container_of(req->req, struct aead_request,
+						 base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	unsigned total = areq->assoclen + areq->cryptlen +
+			 (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
+	struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
+	struct spacc_engine *engine = aead_ctx->generic.engine;
+	int nents = sg_nents_for_len(areq->src, total);
+
+	/* sg_nents_for_len should not fail since it works when mapping sg */
+	if (unlikely(nents < 0)) {
+		dev_err(engine->dev, "Invalid numbers of src SG.\n");
+		return;
+	}
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
+		nents = sg_nents_for_len(areq->dst, total);
+		if (unlikely(nents < 0)) {
+			dev_err(engine->dev, "Invalid numbers of dst SG.\n");
+			return;
+		}
+		dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
+	} else
+		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
+
+	dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
+	dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
+}
+
+static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
+			   dma_addr_t ddt_addr, struct scatterlist *payload,
+			   unsigned nbytes, enum dma_data_direction dir)
+{
+	int nents = sg_nents_for_len(payload, nbytes);
+
+	if (nents < 0) {
+		dev_err(req->engine->dev, "Invalid numbers of SG.\n");
+		return;
+	}
+
+	dma_unmap_sg(req->engine->dev, payload, nents, dir);
+	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
+}
+
+static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			     unsigned int keylen)
+{
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_authenc_keys keys;
+	int err;
+
+	crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
+					      CRYPTO_TFM_REQ_MASK);
+	err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
+				   CRYPTO_TFM_RES_MASK);
+	if (err)
+		return err;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+	if (keys.enckeylen > AES_MAX_KEY_SIZE)
+		goto badkey;
+
+	if (keys.authkeylen > sizeof(ctx->hash_ctx))
+		goto badkey;
+
+	memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
+	ctx->cipher_key_len = keys.enckeylen;
+
+	memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
+	ctx->hash_key_len = keys.authkeylen;
+
+	memzero_explicit(&keys, sizeof(keys));
+	return 0;
+
+badkey:
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int spacc_aead_setauthsize(struct crypto_aead *tfm,
+				  unsigned int authsize)
+{
+	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
+
+	return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
+}
+
+/*
+ * Check if an AEAD request requires a fallback operation. Some requests can't
+ * be completed in hardware because the hardware may not support certain key
+ * sizes. In these cases we need to complete the request in software.
+ */
+static int spacc_aead_need_fallback(struct aead_request *aead_req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
+
+	/*
+	 * If we have a non-supported key-length, then we need to do a
+	 * software fallback.
+	 */
+	if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
+	    SPA_CTRL_CIPH_ALG_AES &&
+	    ctx->cipher_key_len != AES_KEYSIZE_128 &&
+	    ctx->cipher_key_len != AES_KEYSIZE_256)
+		return 1;
+
+	return 0;
+}
+
+static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
+				  bool is_encrypt)
+{
+	struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
+	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
+	struct aead_request *subreq = aead_request_ctx(req);
+
+	aead_request_set_tfm(subreq, ctx->sw_cipher);
+	aead_request_set_callback(subreq, req->base.flags,
+				  req->base.complete, req->base.data);
+	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+			       req->iv);
+	aead_request_set_ad(subreq, req->assoclen);
+
+	return is_encrypt ? crypto_aead_encrypt(subreq) :
+			    crypto_aead_decrypt(subreq);
+}
+
+static void spacc_aead_complete(struct spacc_req *req)
+{
+	spacc_aead_free_ddts(req);
+	req->req->complete(req->req, req->result);
+}
+
+static int spacc_aead_submit(struct spacc_req *req)
+{
+	struct aead_request *aead_req =
+		container_of(req->req, struct aead_request, base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+	unsigned int authsize = crypto_aead_authsize(aead);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+	struct spacc_engine *engine = ctx->generic.engine;
+	u32 ctrl, proc_len, assoc_len;
+
+	req->result = -EINPROGRESS;
+	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
+		ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
+		ctx->hash_ctx, ctx->hash_key_len);
+
+	/* Set the source and destination DDT pointers. */
+	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
+	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
+	writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
+
+	assoc_len = aead_req->assoclen;
+	proc_len = aead_req->cryptlen + assoc_len;
+
+	/*
+	 * If we are decrypting, we need to take the length of the ICV out of
+	 * the processing length.
+	 */
+	if (!req->is_encrypt)
+		proc_len -= authsize;
+
+	writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+	writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
+	writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
+	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
+	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
+
+	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
+		(1 << SPA_CTRL_ICV_APPEND);
+	if (req->is_encrypt)
+		ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
+	else
+		ctrl |= (1 << SPA_CTRL_KEY_EXP);
+
+	mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+	writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
+
+	return -EINPROGRESS;
+}
+
+static int spacc_req_submit(struct spacc_req *req);
+
+static void spacc_push(struct spacc_engine *engine)
+{
+	struct spacc_req *req;
+
+	while (!list_empty(&engine->pending) &&
+	       engine->in_flight + 1 <= engine->fifo_sz) {
+
+		++engine->in_flight;
+		req = list_first_entry(&engine->pending, struct spacc_req,
+				       list);
+		list_move_tail(&req->list, &engine->in_progress);
+
+		req->result = spacc_req_submit(req);
+	}
+}
+
+/*
+ * Setup an AEAD request for processing. This will configure the engine, load
+ * the context and then start the packet processing.
+ */
+static int spacc_aead_setup(struct aead_request *req,
+			    unsigned alg_type, bool is_encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_engine *engine = to_spacc_aead(alg)->engine;
+	struct spacc_req *dev_req = aead_request_ctx(req);
+	int err;
+	unsigned long flags;
+
+	dev_req->req		= &req->base;
+	dev_req->is_encrypt	= is_encrypt;
+	dev_req->result		= -EBUSY;
+	dev_req->engine		= engine;
+	dev_req->complete	= spacc_aead_complete;
+
+	if (unlikely(spacc_aead_need_fallback(req) ||
+		     ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
+		return spacc_aead_do_fallback(req, alg_type, is_encrypt);
+
+	if (err)
+		goto out;
+
+	err = -EINPROGRESS;
+	spin_lock_irqsave(&engine->hw_lock, flags);
+	if (unlikely(spacc_fifo_cmd_full(engine)) ||
+	    engine->in_flight + 1 > engine->fifo_sz) {
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+			err = -EBUSY;
+			spin_unlock_irqrestore(&engine->hw_lock, flags);
+			goto out_free_ddts;
+		}
+		list_add_tail(&dev_req->list, &engine->pending);
+	} else {
+		list_add_tail(&dev_req->list, &engine->pending);
+		spacc_push(engine);
+	}
+	spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+	goto out;
+
+out_free_ddts:
+	spacc_aead_free_ddts(dev_req);
+out:
+	return err;
+}
+
+static int spacc_aead_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
+
+	return spacc_aead_setup(req, alg->type, 1);
+}
+
+static int spacc_aead_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct spacc_aead  *alg = to_spacc_aead(crypto_aead_alg(aead));
+
+	return spacc_aead_setup(req, alg->type, 0);
+}
+
+/*
+ * Initialise a new AEAD context. This is responsible for allocating the
+ * fallback cipher and initialising the context.
+ */
+static int spacc_aead_cra_init(struct crypto_aead *tfm)
+{
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+	struct spacc_engine *engine = spacc_alg->engine;
+
+	ctx->generic.flags = spacc_alg->type;
+	ctx->generic.engine = engine;
+	ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
+					   CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->sw_cipher))
+		return PTR_ERR(ctx->sw_cipher);
+	ctx->generic.key_offs = spacc_alg->key_offs;
+	ctx->generic.iv_offs = spacc_alg->iv_offs;
+
+	crypto_aead_set_reqsize(
+		tfm,
+		max(sizeof(struct spacc_req),
+		    sizeof(struct aead_request) +
+		    crypto_aead_reqsize(ctx->sw_cipher)));
+
+	return 0;
+}
+
+/*
+ * Destructor for an AEAD context. This is called when the transform is freed
+ * and must free the fallback cipher.
+ */
+static void spacc_aead_cra_exit(struct crypto_aead *tfm)
+{
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_free_aead(ctx->sw_cipher);
+}
+
+/*
+ * Set the DES key for a block cipher transform. This also performs weak key
+ * checking if the transform has requested it.
+ */
+static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			    unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+
+	if (len > DES3_EDE_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if (unlikely(!des_ekey(tmp, key)) &&
+	    (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, len);
+	ctx->key_len = len;
+
+	return 0;
+}
+
+/*
+ * Set the key for an AES block cipher. Some key lengths are not supported in
+ * hardware so this must also check whether a fallback is needed.
+ */
+static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			    unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+	int err = 0;
+
+	if (len > AES_MAX_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	/*
+	 * IPSec engine only supports 128 and 256 bit AES keys. If we get a
+	 * request for any other size (192 bits) then we need to do a software
+	 * fallback.
+	 */
+	if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
+		if (!ctx->sw_cipher)
+			return -EINVAL;
+
+		/*
+		 * Set the fallback transform to use the same request flags as
+		 * the hardware transform.
+		 */
+		crypto_skcipher_clear_flags(ctx->sw_cipher,
+					    CRYPTO_TFM_REQ_MASK);
+		crypto_skcipher_set_flags(ctx->sw_cipher,
+					  cipher->base.crt_flags &
+					  CRYPTO_TFM_REQ_MASK);
+
+		err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
+
+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+		tfm->crt_flags |=
+			crypto_skcipher_get_flags(ctx->sw_cipher) &
+			CRYPTO_TFM_RES_MASK;
+
+		if (err)
+			goto sw_setkey_failed;
+	}
+
+	memcpy(ctx->key, key, len);
+	ctx->key_len = len;
+
+sw_setkey_failed:
+	return err;
+}
+
+static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
+				  const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+	int err = 0;
+
+	if (len > AES_MAX_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		err = -EINVAL;
+		goto out;
+	}
+
+	memcpy(ctx->key, key, len);
+	ctx->key_len = len;
+
+out:
+	return err;
+}
+
+static int spacc_ablk_need_fallback(struct spacc_req *req)
+{
+	struct spacc_ablk_ctx *ctx;
+	struct crypto_tfm *tfm = req->req->tfm;
+	struct crypto_alg *alg = req->req->tfm->__crt_alg;
+	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+
+	ctx = crypto_tfm_ctx(tfm);
+
+	return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
+			SPA_CTRL_CIPH_ALG_AES &&
+			ctx->key_len != AES_KEYSIZE_128 &&
+			ctx->key_len != AES_KEYSIZE_256;
+}
+
+static void spacc_ablk_complete(struct spacc_req *req)
+{
+	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
+
+	if (ablk_req->src != ablk_req->dst) {
+		spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
+			       ablk_req->nbytes, DMA_TO_DEVICE);
+		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
+			       ablk_req->nbytes, DMA_FROM_DEVICE);
+	} else
+		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
+			       ablk_req->nbytes, DMA_BIDIRECTIONAL);
+
+	req->req->complete(req->req, req->result);
+}
+
+static int spacc_ablk_submit(struct spacc_req *req)
+{
+	struct crypto_tfm *tfm = req->req->tfm;
+	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
+	struct crypto_alg *alg = req->req->tfm->__crt_alg;
+	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+	struct spacc_engine *engine = ctx->generic.engine;
+	u32 ctrl;
+
+	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
+		ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
+		NULL, 0);
+
+	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
+	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
+	writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
+
+	writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
+	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
+	writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
+
+	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
+		(req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
+		 (1 << SPA_CTRL_KEY_EXP));
+
+	mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+	writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
+
+	return -EINPROGRESS;
+}
+
+static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
+				  unsigned alg_type, bool is_encrypt)
+{
+	struct crypto_tfm *old_tfm =
+	    crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
+	SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
+	int err;
+
+	/*
+	 * Change the request to use the software fallback transform, and once
+	 * the ciphering has completed, put the old transform back into the
+	 * request.
+	 */
+	skcipher_request_set_tfm(subreq, ctx->sw_cipher);
+	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+				   req->nbytes, req->info);
+	err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
+			   crypto_skcipher_decrypt(subreq);
+	skcipher_request_zero(subreq);
+
+	return err;
+}
+
+static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
+			    bool is_encrypt)
+{
+	struct crypto_alg *alg = req->base.tfm->__crt_alg;
+	struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+	struct spacc_req *dev_req = ablkcipher_request_ctx(req);
+	unsigned long flags;
+	int err = -ENOMEM;
+
+	dev_req->req		= &req->base;
+	dev_req->is_encrypt	= is_encrypt;
+	dev_req->engine		= engine;
+	dev_req->complete	= spacc_ablk_complete;
+	dev_req->result		= -EINPROGRESS;
+
+	if (unlikely(spacc_ablk_need_fallback(dev_req)))
+		return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
+
+	/*
+	 * Create the DDT's for the engine. If we share the same source and
+	 * destination then we can optimize by reusing the DDT's.
+	 */
+	if (req->src != req->dst) {
+		dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
+			req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
+		if (!dev_req->src_ddt)
+			goto out;
+
+		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
+			req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
+		if (!dev_req->dst_ddt)
+			goto out_free_src;
+	} else {
+		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
+			req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
+		if (!dev_req->dst_ddt)
+			goto out;
+
+		dev_req->src_ddt = NULL;
+		dev_req->src_addr = dev_req->dst_addr;
+	}
+
+	err = -EINPROGRESS;
+	spin_lock_irqsave(&engine->hw_lock, flags);
+	/*
+	 * Check if the engine will accept the operation now. If it won't then
+	 * we either stick it on the end of a pending list if we can backlog,
+	 * or bailout with an error if not.
+	 */
+	if (unlikely(spacc_fifo_cmd_full(engine)) ||
+	    engine->in_flight + 1 > engine->fifo_sz) {
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+			err = -EBUSY;
+			spin_unlock_irqrestore(&engine->hw_lock, flags);
+			goto out_free_ddts;
+		}
+		list_add_tail(&dev_req->list, &engine->pending);
+	} else {
+		list_add_tail(&dev_req->list, &engine->pending);
+		spacc_push(engine);
+	}
+	spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+	goto out;
+
+out_free_ddts:
+	spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
+		       req->nbytes, req->src == req->dst ?
+		       DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
+out_free_src:
+	if (req->src != req->dst)
+		spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
+			       req->src, req->nbytes, DMA_TO_DEVICE);
+out:
+	return err;
+}
+
+static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
+{
+	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+	struct spacc_engine *engine = spacc_alg->engine;
+
+	ctx->generic.flags = spacc_alg->type;
+	ctx->generic.engine = engine;
+	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+		ctx->sw_cipher = crypto_alloc_skcipher(
+			alg->cra_name, 0, CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(ctx->sw_cipher)) {
+			dev_warn(engine->dev, "failed to allocate fallback for %s\n",
+				 alg->cra_name);
+			return PTR_ERR(ctx->sw_cipher);
+		}
+	}
+	ctx->generic.key_offs = spacc_alg->key_offs;
+	ctx->generic.iv_offs = spacc_alg->iv_offs;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
+
+	return 0;
+}
+
+static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
+{
+	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_skcipher(ctx->sw_cipher);
+}
+
+static int spacc_ablk_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+	return spacc_ablk_setup(req, alg->type, 1);
+}
+
+static int spacc_ablk_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+	return spacc_ablk_setup(req, alg->type, 0);
+}
+
+static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
+{
+	return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
+		SPA_FIFO_STAT_EMPTY;
+}
+
+static void spacc_process_done(struct spacc_engine *engine)
+{
+	struct spacc_req *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&engine->hw_lock, flags);
+
+	while (!spacc_fifo_stat_empty(engine)) {
+		req = list_first_entry(&engine->in_progress, struct spacc_req,
+				       list);
+		list_move_tail(&req->list, &engine->completed);
+		--engine->in_flight;
+
+		/* POP the status register. */
+		writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
+		req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
+		     SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
+
+		/*
+		 * Convert the SPAcc error status into the standard POSIX error
+		 * codes.
+		 */
+		if (unlikely(req->result)) {
+			switch (req->result) {
+			case SPA_STATUS_ICV_FAIL:
+				req->result = -EBADMSG;
+				break;
+
+			case SPA_STATUS_MEMORY_ERROR:
+				dev_warn(engine->dev,
+					 "memory error triggered\n");
+				req->result = -EFAULT;
+				break;
+
+			case SPA_STATUS_BLOCK_ERROR:
+				dev_warn(engine->dev,
+					 "block error triggered\n");
+				req->result = -EIO;
+				break;
+			}
+		}
+	}
+
+	tasklet_schedule(&engine->complete);
+
+	spin_unlock_irqrestore(&engine->hw_lock, flags);
+}
+
+static irqreturn_t spacc_spacc_irq(int irq, void *dev)
+{
+	struct spacc_engine *engine = (struct spacc_engine *)dev;
+	u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
+
+	writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
+	spacc_process_done(engine);
+
+	return IRQ_HANDLED;
+}
+
+static void spacc_packet_timeout(struct timer_list *t)
+{
+	struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
+
+	spacc_process_done(engine);
+}
+
+static int spacc_req_submit(struct spacc_req *req)
+{
+	struct crypto_alg *alg = req->req->tfm->__crt_alg;
+
+	if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
+		return spacc_aead_submit(req);
+	else
+		return spacc_ablk_submit(req);
+}
+
+static void spacc_spacc_complete(unsigned long data)
+{
+	struct spacc_engine *engine = (struct spacc_engine *)data;
+	struct spacc_req *req, *tmp;
+	unsigned long flags;
+	LIST_HEAD(completed);
+
+	spin_lock_irqsave(&engine->hw_lock, flags);
+
+	list_splice_init(&engine->completed, &completed);
+	spacc_push(engine);
+	if (engine->in_flight)
+		mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+	spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+	list_for_each_entry_safe(req, tmp, &completed, list) {
+		list_del(&req->list);
+		req->complete(req);
+	}
+}
+
+#ifdef CONFIG_PM
+static int spacc_suspend(struct device *dev)
+{
+	struct spacc_engine *engine = dev_get_drvdata(dev);
+
+	/*
+	 * We only support standby mode. All we have to do is gate the clock to
+	 * the spacc. The hardware will preserve state until we turn it back
+	 * on again.
+	 */
+	clk_disable(engine->clk);
+
+	return 0;
+}
+
+static int spacc_resume(struct device *dev)
+{
+	struct spacc_engine *engine = dev_get_drvdata(dev);
+
+	return clk_enable(engine->clk);
+}
+
+static const struct dev_pm_ops spacc_pm_ops = {
+	.suspend	= spacc_suspend,
+	.resume		= spacc_resume,
+};
+#endif /* CONFIG_PM */
+
+static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
+{
+	return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
+}
+
+static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct spacc_engine *engine = spacc_dev_to_engine(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
+}
+
+static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t len)
+{
+	struct spacc_engine *engine = spacc_dev_to_engine(dev);
+	unsigned long thresh;
+
+	if (kstrtoul(buf, 0, &thresh))
+		return -EINVAL;
+
+	thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
+
+	engine->stat_irq_thresh = thresh;
+	writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
+	       engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
+
+	return len;
+}
+static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
+		   spacc_stat_irq_thresh_store);
+
+static struct spacc_alg ipsec_engine_algs[] = {
+	{
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
+		.key_offs = 0,
+		.iv_offs = AES_MAX_KEY_SIZE,
+		.alg = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "cbc-aes-picoxcell",
+			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY |
+				     CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_NEED_FALLBACK,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_module = THIS_MODULE,
+			.cra_ablkcipher = {
+				.setkey = spacc_aes_setkey,
+				.encrypt = spacc_ablk_encrypt,
+				.decrypt = spacc_ablk_decrypt,
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			},
+			.cra_init = spacc_ablk_cra_init,
+			.cra_exit = spacc_ablk_cra_exit,
+		},
+	},
+	{
+		.key_offs = 0,
+		.iv_offs = AES_MAX_KEY_SIZE,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
+		.alg = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "ecb-aes-picoxcell",
+			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				CRYPTO_ALG_KERN_DRIVER_ONLY |
+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_module = THIS_MODULE,
+			.cra_ablkcipher = {
+				.setkey = spacc_aes_setkey,
+				.encrypt = spacc_ablk_encrypt,
+				.decrypt = spacc_ablk_decrypt,
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+			},
+			.cra_init = spacc_ablk_cra_init,
+			.cra_exit = spacc_ablk_cra_exit,
+		},
+	},
+	{
+		.key_offs = DES_BLOCK_SIZE,
+		.iv_offs = 0,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
+		.alg = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "cbc-des-picoxcell",
+			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_module = THIS_MODULE,
+			.cra_ablkcipher = {
+				.setkey = spacc_des_setkey,
+				.encrypt = spacc_ablk_encrypt,
+				.decrypt = spacc_ablk_decrypt,
+				.min_keysize = DES_KEY_SIZE,
+				.max_keysize = DES_KEY_SIZE,
+				.ivsize = DES_BLOCK_SIZE,
+			},
+			.cra_init = spacc_ablk_cra_init,
+			.cra_exit = spacc_ablk_cra_exit,
+		},
+	},
+	{
+		.key_offs = DES_BLOCK_SIZE,
+		.iv_offs = 0,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
+		.alg = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "ecb-des-picoxcell",
+			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_module = THIS_MODULE,
+			.cra_ablkcipher = {
+				.setkey = spacc_des_setkey,
+				.encrypt = spacc_ablk_encrypt,
+				.decrypt = spacc_ablk_decrypt,
+				.min_keysize = DES_KEY_SIZE,
+				.max_keysize = DES_KEY_SIZE,
+			},
+			.cra_init = spacc_ablk_cra_init,
+			.cra_exit = spacc_ablk_cra_exit,
+		},
+	},
+	{
+		.key_offs = DES_BLOCK_SIZE,
+		.iv_offs = 0,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
+		.alg = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "cbc-des3-ede-picoxcell",
+			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_module = THIS_MODULE,
+			.cra_ablkcipher = {
+				.setkey = spacc_des_setkey,
+				.encrypt = spacc_ablk_encrypt,
+				.decrypt = spacc_ablk_decrypt,
+				.min_keysize = DES3_EDE_KEY_SIZE,
+				.max_keysize = DES3_EDE_KEY_SIZE,
+				.ivsize = DES3_EDE_BLOCK_SIZE,
+			},
+			.cra_init = spacc_ablk_cra_init,
+			.cra_exit = spacc_ablk_cra_exit,
+		},
+	},
+	{
+		.key_offs = DES_BLOCK_SIZE,
+		.iv_offs = 0,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
+		.alg = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "ecb-des3-ede-picoxcell",
+			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_module = THIS_MODULE,
+			.cra_ablkcipher = {
+				.setkey = spacc_des_setkey,
+				.encrypt = spacc_ablk_encrypt,
+				.decrypt = spacc_ablk_decrypt,
+				.min_keysize = DES3_EDE_KEY_SIZE,
+				.max_keysize = DES3_EDE_KEY_SIZE,
+			},
+			.cra_init = spacc_ablk_cra_init,
+			.cra_exit = spacc_ablk_cra_exit,
+		},
+	},
+};
+
+static struct spacc_aead ipsec_engine_aeads[] = {
+	{
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_SHA |
+				SPA_CTRL_HASH_MODE_HMAC,
+		.key_offs = 0,
+		.iv_offs = AES_MAX_KEY_SIZE,
+		.alg = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
+			},
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
+		},
+	},
+	{
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_SHA256 |
+				SPA_CTRL_HASH_MODE_HMAC,
+		.key_offs = 0,
+		.iv_offs = AES_MAX_KEY_SIZE,
+		.alg = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
+			},
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
+		},
+	},
+	{
+		.key_offs = 0,
+		.iv_offs = AES_MAX_KEY_SIZE,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_MD5 |
+				SPA_CTRL_HASH_MODE_HMAC,
+		.alg = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
+			},
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
+		},
+	},
+	{
+		.key_offs = DES_BLOCK_SIZE,
+		.iv_offs = 0,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_SHA |
+				SPA_CTRL_HASH_MODE_HMAC,
+		.alg = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
+			},
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
+		},
+	},
+	{
+		.key_offs = DES_BLOCK_SIZE,
+		.iv_offs = 0,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_SHA256 |
+				SPA_CTRL_HASH_MODE_HMAC,
+		.alg = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
+			},
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
+		},
+	},
+	{
+		.key_offs = DES_BLOCK_SIZE,
+		.iv_offs = 0,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_MD5 |
+				SPA_CTRL_HASH_MODE_HMAC,
+		.alg = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
+			},
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
+		},
+	},
+};
+
+static struct spacc_alg l2_engine_algs[] = {
+	{
+		.key_offs = 0,
+		.iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
+				SPA_CTRL_CIPH_MODE_F8,
+		.alg = {
+			.cra_name = "f8(kasumi)",
+			.cra_driver_name = "f8-kasumi-picoxcell",
+			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+			.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER |
+					CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 8,
+			.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_module = THIS_MODULE,
+			.cra_ablkcipher = {
+				.setkey = spacc_kasumi_f8_setkey,
+				.encrypt = spacc_ablk_encrypt,
+				.decrypt = spacc_ablk_decrypt,
+				.min_keysize = 16,
+				.max_keysize = 16,
+				.ivsize = 8,
+			},
+			.cra_init = spacc_ablk_cra_init,
+			.cra_exit = spacc_ablk_cra_exit,
+		},
+	},
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id spacc_of_id_table[] = {
+	{ .compatible = "picochip,spacc-ipsec" },
+	{ .compatible = "picochip,spacc-l2" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, spacc_of_id_table);
+#endif /* CONFIG_OF */
+
+static int spacc_probe(struct platform_device *pdev)
+{
+	int i, err, ret;
+	struct resource *mem, *irq;
+	struct device_node *np = pdev->dev.of_node;
+	struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
+						   GFP_KERNEL);
+	if (!engine)
+		return -ENOMEM;
+
+	if (of_device_is_compatible(np, "picochip,spacc-ipsec")) {
+		engine->max_ctxs	= SPACC_CRYPTO_IPSEC_MAX_CTXS;
+		engine->cipher_pg_sz	= SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
+		engine->hash_pg_sz	= SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
+		engine->fifo_sz		= SPACC_CRYPTO_IPSEC_FIFO_SZ;
+		engine->algs		= ipsec_engine_algs;
+		engine->num_algs	= ARRAY_SIZE(ipsec_engine_algs);
+		engine->aeads		= ipsec_engine_aeads;
+		engine->num_aeads	= ARRAY_SIZE(ipsec_engine_aeads);
+	} else if (of_device_is_compatible(np, "picochip,spacc-l2")) {
+		engine->max_ctxs	= SPACC_CRYPTO_L2_MAX_CTXS;
+		engine->cipher_pg_sz	= SPACC_CRYPTO_L2_CIPHER_PG_SZ;
+		engine->hash_pg_sz	= SPACC_CRYPTO_L2_HASH_PG_SZ;
+		engine->fifo_sz		= SPACC_CRYPTO_L2_FIFO_SZ;
+		engine->algs		= l2_engine_algs;
+		engine->num_algs	= ARRAY_SIZE(l2_engine_algs);
+	} else {
+		return -EINVAL;
+	}
+
+	engine->name = dev_name(&pdev->dev);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	engine->regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(engine->regs))
+		return PTR_ERR(engine->regs);
+
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "no memory/irq resource for engine\n");
+		return -ENXIO;
+	}
+
+	if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
+			     engine->name, engine)) {
+		dev_err(engine->dev, "failed to request IRQ\n");
+		return -EBUSY;
+	}
+
+	engine->dev		= &pdev->dev;
+	engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
+	engine->hash_key_base	= engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
+
+	engine->req_pool = dmam_pool_create(engine->name, engine->dev,
+		MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
+	if (!engine->req_pool)
+		return -ENOMEM;
+
+	spin_lock_init(&engine->hw_lock);
+
+	engine->clk = clk_get(&pdev->dev, "ref");
+	if (IS_ERR(engine->clk)) {
+		dev_info(&pdev->dev, "clk unavailable\n");
+		return PTR_ERR(engine->clk);
+	}
+
+	if (clk_prepare_enable(engine->clk)) {
+		dev_info(&pdev->dev, "unable to prepare/enable clk\n");
+		ret = -EIO;
+		goto err_clk_put;
+	}
+
+	ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+	if (ret)
+		goto err_clk_disable;
+
+
+	/*
+	 * Use an IRQ threshold of 50% as a default. This seems to be a
+	 * reasonable trade off of latency against throughput but can be
+	 * changed at runtime.
+	 */
+	engine->stat_irq_thresh = (engine->fifo_sz / 2);
+
+	/*
+	 * Configure the interrupts. We only use the STAT_CNT interrupt as we
+	 * only submit a new packet for processing when we complete another in
+	 * the queue. This minimizes time spent in the interrupt handler.
+	 */
+	writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
+	       engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
+	writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
+	       engine->regs + SPA_IRQ_EN_REG_OFFSET);
+
+	timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
+
+	INIT_LIST_HEAD(&engine->pending);
+	INIT_LIST_HEAD(&engine->completed);
+	INIT_LIST_HEAD(&engine->in_progress);
+	engine->in_flight = 0;
+	tasklet_init(&engine->complete, spacc_spacc_complete,
+		     (unsigned long)engine);
+
+	platform_set_drvdata(pdev, engine);
+
+	ret = -EINVAL;
+	INIT_LIST_HEAD(&engine->registered_algs);
+	for (i = 0; i < engine->num_algs; ++i) {
+		engine->algs[i].engine = engine;
+		err = crypto_register_alg(&engine->algs[i].alg);
+		if (!err) {
+			list_add_tail(&engine->algs[i].entry,
+				      &engine->registered_algs);
+			ret = 0;
+		}
+		if (err)
+			dev_err(engine->dev, "failed to register alg \"%s\"\n",
+				engine->algs[i].alg.cra_name);
+		else
+			dev_dbg(engine->dev, "registered alg \"%s\"\n",
+				engine->algs[i].alg.cra_name);
+	}
+
+	INIT_LIST_HEAD(&engine->registered_aeads);
+	for (i = 0; i < engine->num_aeads; ++i) {
+		engine->aeads[i].engine = engine;
+		err = crypto_register_aead(&engine->aeads[i].alg);
+		if (!err) {
+			list_add_tail(&engine->aeads[i].entry,
+				      &engine->registered_aeads);
+			ret = 0;
+		}
+		if (err)
+			dev_err(engine->dev, "failed to register alg \"%s\"\n",
+				engine->aeads[i].alg.base.cra_name);
+		else
+			dev_dbg(engine->dev, "registered alg \"%s\"\n",
+				engine->aeads[i].alg.base.cra_name);
+	}
+
+	if (!ret)
+		return 0;
+
+	del_timer_sync(&engine->packet_timeout);
+	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+err_clk_disable:
+	clk_disable_unprepare(engine->clk);
+err_clk_put:
+	clk_put(engine->clk);
+
+	return ret;
+}
+
+static int spacc_remove(struct platform_device *pdev)
+{
+	struct spacc_aead *aead, *an;
+	struct spacc_alg *alg, *next;
+	struct spacc_engine *engine = platform_get_drvdata(pdev);
+
+	del_timer_sync(&engine->packet_timeout);
+	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+
+	list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
+		list_del(&aead->entry);
+		crypto_unregister_aead(&aead->alg);
+	}
+
+	list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
+		list_del(&alg->entry);
+		crypto_unregister_alg(&alg->alg);
+	}
+
+	clk_disable_unprepare(engine->clk);
+	clk_put(engine->clk);
+
+	return 0;
+}
+
+static struct platform_driver spacc_driver = {
+	.probe		= spacc_probe,
+	.remove		= spacc_remove,
+	.driver		= {
+		.name	= "picochip,spacc",
+#ifdef CONFIG_PM
+		.pm	= &spacc_pm_ops,
+#endif /* CONFIG_PM */
+		.of_match_table	= of_match_ptr(spacc_of_id_table),
+	},
+};
+
+module_platform_driver(spacc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/picoxcell_crypto_regs.h b/drivers/crypto/picoxcell_crypto_regs.h
new file mode 100644
index 0000000..af93442
--- /dev/null
+++ b/drivers/crypto/picoxcell_crypto_regs.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2010 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __PICOXCELL_CRYPTO_REGS_H__
+#define __PICOXCELL_CRYPTO_REGS_H__
+
+#define SPA_STATUS_OK			0
+#define SPA_STATUS_ICV_FAIL		1
+#define SPA_STATUS_MEMORY_ERROR		2
+#define SPA_STATUS_BLOCK_ERROR		3
+
+#define SPA_IRQ_CTRL_STAT_CNT_OFFSET	16
+#define SPA_IRQ_STAT_STAT_MASK		(1 << 4)
+#define SPA_FIFO_STAT_STAT_OFFSET	16
+#define SPA_FIFO_STAT_STAT_CNT_MASK	(0x3F << SPA_FIFO_STAT_STAT_OFFSET)
+#define SPA_STATUS_RES_CODE_OFFSET	24
+#define SPA_STATUS_RES_CODE_MASK	(0x3 << SPA_STATUS_RES_CODE_OFFSET)
+#define SPA_KEY_SZ_CTX_INDEX_OFFSET	8
+#define SPA_KEY_SZ_CIPHER_OFFSET	31
+
+#define SPA_IRQ_EN_REG_OFFSET		0x00000000
+#define SPA_IRQ_STAT_REG_OFFSET		0x00000004
+#define SPA_IRQ_CTRL_REG_OFFSET		0x00000008
+#define SPA_FIFO_STAT_REG_OFFSET	0x0000000C
+#define SPA_SDMA_BRST_SZ_REG_OFFSET	0x00000010
+#define SPA_SRC_PTR_REG_OFFSET		0x00000020
+#define SPA_DST_PTR_REG_OFFSET		0x00000024
+#define SPA_OFFSET_REG_OFFSET		0x00000028
+#define SPA_AAD_LEN_REG_OFFSET		0x0000002C
+#define SPA_PROC_LEN_REG_OFFSET		0x00000030
+#define SPA_ICV_LEN_REG_OFFSET		0x00000034
+#define SPA_ICV_OFFSET_REG_OFFSET	0x00000038
+#define SPA_SW_CTRL_REG_OFFSET		0x0000003C
+#define SPA_CTRL_REG_OFFSET		0x00000040
+#define SPA_AUX_INFO_REG_OFFSET		0x0000004C
+#define SPA_STAT_POP_REG_OFFSET		0x00000050
+#define SPA_STATUS_REG_OFFSET		0x00000054
+#define SPA_KEY_SZ_REG_OFFSET		0x00000100
+#define SPA_CIPH_KEY_BASE_REG_OFFSET	0x00004000
+#define SPA_HASH_KEY_BASE_REG_OFFSET	0x00008000
+#define SPA_RC4_CTX_BASE_REG_OFFSET	0x00020000
+
+#define SPA_IRQ_EN_REG_RESET		0x00000000
+#define SPA_IRQ_CTRL_REG_RESET		0x00000000
+#define SPA_FIFO_STAT_REG_RESET		0x00000000
+#define SPA_SDMA_BRST_SZ_REG_RESET	0x00000000
+#define SPA_SRC_PTR_REG_RESET		0x00000000
+#define SPA_DST_PTR_REG_RESET		0x00000000
+#define SPA_OFFSET_REG_RESET		0x00000000
+#define SPA_AAD_LEN_REG_RESET		0x00000000
+#define SPA_PROC_LEN_REG_RESET		0x00000000
+#define SPA_ICV_LEN_REG_RESET		0x00000000
+#define SPA_ICV_OFFSET_REG_RESET	0x00000000
+#define SPA_SW_CTRL_REG_RESET		0x00000000
+#define SPA_CTRL_REG_RESET		0x00000000
+#define SPA_AUX_INFO_REG_RESET		0x00000000
+#define SPA_STAT_POP_REG_RESET		0x00000000
+#define SPA_STATUS_REG_RESET		0x00000000
+#define SPA_KEY_SZ_REG_RESET		0x00000000
+
+#define SPA_CTRL_HASH_ALG_IDX		4
+#define SPA_CTRL_CIPH_MODE_IDX		8
+#define SPA_CTRL_HASH_MODE_IDX		12
+#define SPA_CTRL_CTX_IDX		16
+#define SPA_CTRL_ENCRYPT_IDX		24
+#define SPA_CTRL_AAD_COPY		25
+#define SPA_CTRL_ICV_PT			26
+#define SPA_CTRL_ICV_ENC		27
+#define SPA_CTRL_ICV_APPEND		28
+#define SPA_CTRL_KEY_EXP		29
+
+#define SPA_KEY_SZ_CXT_IDX		8
+#define SPA_KEY_SZ_CIPHER_IDX		31
+
+#define SPA_IRQ_EN_CMD0_EN		(1 << 0)
+#define SPA_IRQ_EN_STAT_EN		(1 << 4)
+#define SPA_IRQ_EN_GLBL_EN		(1 << 31)
+
+#define SPA_CTRL_CIPH_ALG_NULL		0x00
+#define SPA_CTRL_CIPH_ALG_DES		0x01
+#define SPA_CTRL_CIPH_ALG_AES		0x02
+#define SPA_CTRL_CIPH_ALG_RC4		0x03
+#define SPA_CTRL_CIPH_ALG_MULTI2	0x04
+#define SPA_CTRL_CIPH_ALG_KASUMI	0x05
+
+#define SPA_CTRL_HASH_ALG_NULL		(0x00 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_MD5		(0x01 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA		(0x02 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA224	(0x03 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA256	(0x04 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA384	(0x05 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA512	(0x06 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_AESMAC	(0x07 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_AESCMAC	(0x08 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_KASF9		(0x09 << SPA_CTRL_HASH_ALG_IDX)
+
+#define SPA_CTRL_CIPH_MODE_NULL		(0x00 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_ECB		(0x00 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CBC		(0x01 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CTR		(0x02 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CCM		(0x03 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_GCM		(0x05 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_OFB		(0x07 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CFB		(0x08 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_F8		(0x09 << SPA_CTRL_CIPH_MODE_IDX)
+
+#define SPA_CTRL_HASH_MODE_RAW		(0x00 << SPA_CTRL_HASH_MODE_IDX)
+#define SPA_CTRL_HASH_MODE_SSLMAC	(0x01 << SPA_CTRL_HASH_MODE_IDX)
+#define SPA_CTRL_HASH_MODE_HMAC		(0x02 << SPA_CTRL_HASH_MODE_IDX)
+
+#define SPA_FIFO_STAT_EMPTY		(1 << 31)
+#define SPA_FIFO_CMD_FULL		(1 << 7)
+
+#endif /* __PICOXCELL_CRYPTO_REGS_H__ */
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
new file mode 100644
index 0000000..ce3cae4
--- /dev/null
+++ b/drivers/crypto/qat/Kconfig
@@ -0,0 +1,83 @@
+config CRYPTO_DEV_QAT
+	tristate
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AKCIPHER
+	select CRYPTO_DH
+	select CRYPTO_HMAC
+	select CRYPTO_RSA
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	select FW_LOADER
+
+config CRYPTO_DEV_QAT_DH895xCC
+	tristate "Support for Intel(R) DH895xCC"
+	depends on X86 && PCI
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_C3XXX
+	tristate "Support for Intel(R) C3XXX"
+	depends on X86 && PCI
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c3xxx.
+
+config CRYPTO_DEV_QAT_C62X
+	tristate "Support for Intel(R) C62X"
+	depends on X86 && PCI
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c62x.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+	tristate "Support for Intel(R) DH895xCC Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+
+	help
+	  Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_dh895xccvf.
+
+config CRYPTO_DEV_QAT_C3XXXVF
+	tristate "Support for Intel(R) C3XXX Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c3xxxvf.
+
+config CRYPTO_DEV_QAT_C62XVF
+	tristate "Support for Intel(R) C62X Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c62xvf.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
new file mode 100644
index 0000000..7dd15e7
--- /dev/null
+++ b/drivers/crypto/qat/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile
new file mode 100644
index 0000000..8f5fd48
--- /dev/null
+++ b/drivers/crypto/qat/qat_c3xxx/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
+qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
new file mode 100644
index 0000000..6bc68bc
--- /dev/null
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -0,0 +1,239 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_c3xxx_hw_data.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const u32 thrd_to_arb_map_6_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c3xxx_class = {
+	.name = ADF_C3XXX_DEVICE_NAME,
+	.type = DEV_C3XXX,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return (~fuse) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET &
+		ADF_C3XXX_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return (~fuse) & ADF_C3XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return 0;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int aes = get_num_aes(self);
+
+	if (aes == 6)
+		return DEV_SKU_4;
+
+	return DEV_SKU_UNKNOWN;
+}
+
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_6_me_sku;
+		break;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C3XXX_PF2VF_OFFSET(i);
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C3XXX_VINTMSK_OFFSET(i);
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
+		val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
+		val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
+		val |= ADF_C3XXX_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
+		val |= ADF_C3XXX_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
+	}
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *addr;
+
+	addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr;
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET,
+		   ADF_C3XXX_SMIA0_MASK);
+	ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET,
+		   ADF_C3XXX_SMIA1_MASK);
+}
+
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c3xxx_class;
+	hw_data->instance_id = c3xxx_class.instances++;
+	hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_C3XXX_FW;
+	hw_data->fw_mmp_name = ADF_C3XXX_MMP;
+	hw_data->init_admin_comms = adf_init_admin_comms;
+	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
+	hw_data->init_arb = adf_init_arb;
+	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->reset_device = adf_reset_flr;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
new file mode 100644
index 0000000..afc9a0a
--- /dev/null
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C3XXX_HW_DATA_H_
+#define ADF_C3XXX_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C3XXX_PMISC_BAR 0
+#define ADF_C3XXX_ETR_BAR 1
+#define ADF_C3XXX_RX_RINGS_OFFSET 8
+#define ADF_C3XXX_TX_RINGS_MASK 0xFF
+#define ADF_C3XXX_MAX_ACCELERATORS 3
+#define ADF_C3XXX_MAX_ACCELENGINES 6
+#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
+#define ADF_C3XXX_ACCELERATORS_MASK 0x7
+#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
+#define ADF_C3XXX_ETR_MAX_BANKS 16
+#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_C3XXX_SMIA0_MASK 0xFFFF
+#define ADF_C3XXX_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_C3XXX_ERRSSMSH_EN BIT(3)
+
+#define ADF_C3XXX_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_C3XXX_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+
+/* Firmware Binary */
+#define ADF_C3XXX_FW "qat_c3xxx.bin"
+#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
new file mode 100644
index 0000000..763c216
--- /dev/null
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -0,0 +1,335 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxx_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C3XXX_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C3XXX_PCI_DEVICE_ID:
+			adf_clean_hw_data_c3xxx(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_C3XXX_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+		/* If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow. */
+		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c3xxx(accel_dev->hw_device);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err_free_reg;
+	}
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err_free_reg;
+	}
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C3XXX_FW);
+MODULE_FIRMWARE(ADF_C3XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile
new file mode 100644
index 0000000..16d178e
--- /dev/null
+++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
+qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
new file mode 100644
index 0000000..d2d0ae4
--- /dev/null
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -0,0 +1,150 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static struct adf_hw_device_class c3xxxiov_class = {
+	.name = ADF_C3XXXVF_DEVICE_NAME,
+	.type = DEV_C3XXXVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_C3XXXIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_C3XXXIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C3XXXIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C3XXXIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c3xxxiov_class;
+	hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
+}
+
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
new file mode 100644
index 0000000..934f216
--- /dev/null
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
@@ -0,0 +1,64 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C3XXXVF_HW_DATA_H_
+#define ADF_C3XXXVF_HW_DATA_H_
+
+#define ADF_C3XXXIOV_PMISC_BAR 1
+#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
+#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
+#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
+#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
+#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
+#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
+#define ADF_C3XXXIOV_ETR_BAR 0
+#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
+#define ADF_C3XXXIOV_PF2VF_OFFSET	0x200
+#define ADF_C3XXXIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
new file mode 100644
index 0000000..613c7d5
--- /dev/null
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C3XXXVF_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C3XXXIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_C3XXXIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile
new file mode 100644
index 0000000..bd75ace
--- /dev/null
+++ b/drivers/crypto/qat/qat_c62x/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
+qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
new file mode 100644
index 0000000..618cec3
--- /dev/null
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -0,0 +1,249 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_c62x_hw_data.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const u32 thrd_to_arb_map_8_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0
+};
+
+static const u32 thrd_to_arb_map_10_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c62x_class = {
+	.name = ADF_C62X_DEVICE_NAME,
+	.type = DEV_C62X,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return (~fuse) >> ADF_C62X_ACCELERATORS_REG_OFFSET &
+			  ADF_C62X_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return (~fuse) & ADF_C62X_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int aes = get_num_aes(self);
+
+	if (aes == 8)
+		return DEV_SKU_2;
+	else if (aes == 10)
+		return DEV_SKU_4;
+
+	return DEV_SKU_UNKNOWN;
+}
+
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_2:
+		*arb_map_config = thrd_to_arb_map_8_me_sku;
+		break;
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_10_me_sku;
+		break;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C62X_PF2VF_OFFSET(i);
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C62X_VINTMSK_OFFSET(i);
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
+		val |= ADF_C62X_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
+		val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
+		val |= ADF_C62X_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
+		val |= ADF_C62X_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
+	}
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *addr;
+
+	addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr;
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET,
+		   ADF_C62X_SMIA0_MASK);
+	ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET,
+		   ADF_C62X_SMIA1_MASK);
+}
+
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c62x_class;
+	hw_data->instance_id = c62x_class.instances++;
+	hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_C62X_FW;
+	hw_data->fw_mmp_name = ADF_C62X_MMP;
+	hw_data->init_admin_comms = adf_init_admin_comms;
+	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
+	hw_data->init_arb = adf_init_arb;
+	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->reset_device = adf_reset_flr;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
new file mode 100644
index 0000000..17a8a32
--- /dev/null
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
@@ -0,0 +1,84 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C62X_HW_DATA_H_
+#define ADF_C62X_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C62X_SRAM_BAR 0
+#define ADF_C62X_PMISC_BAR 1
+#define ADF_C62X_ETR_BAR 2
+#define ADF_C62X_RX_RINGS_OFFSET 8
+#define ADF_C62X_TX_RINGS_MASK 0xFF
+#define ADF_C62X_MAX_ACCELERATORS 5
+#define ADF_C62X_MAX_ACCELENGINES 10
+#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
+#define ADF_C62X_ACCELERATORS_MASK 0x1F
+#define ADF_C62X_ACCELENGINES_MASK 0x3FF
+#define ADF_C62X_ETR_MAX_BANKS 16
+#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_C62X_SMIA0_MASK 0xFFFF
+#define ADF_C62X_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_C62X_ERRSSMSH_EN BIT(3)
+
+#define ADF_C62X_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_C62X_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+
+/* Firmware Binary */
+#define ADF_C62X_FW "qat_c62x.bin"
+#define ADF_C62X_MMP "qat_c62x_mmp.bin"
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
new file mode 100644
index 0000000..9cb8329
--- /dev/null
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -0,0 +1,335 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62x_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C62X_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C62X_PCI_DEVICE_ID:
+			adf_clean_hw_data_c62x(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_C62X_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+		/* If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow. */
+		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c62x(accel_dev->hw_device);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err_free_reg;
+	}
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err_free_reg;
+	}
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C62X_FW);
+MODULE_FIRMWARE(ADF_C62X_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile
new file mode 100644
index 0000000..ecd708c
--- /dev/null
+++ b/drivers/crypto/qat/qat_c62xvf/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
+qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
new file mode 100644
index 0000000..38e4bc0
--- /dev/null
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -0,0 +1,150 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_c62xvf_hw_data.h"
+
+static struct adf_hw_device_class c62xiov_class = {
+	.name = ADF_C62XVF_DEVICE_NAME,
+	.type = DEV_C62XVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_C62XIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_C62XIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C62XIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C62XIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c62xiov_class;
+	hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
+}
+
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
new file mode 100644
index 0000000..a28d83e
--- /dev/null
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
@@ -0,0 +1,64 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C62XVF_HW_DATA_H_
+#define ADF_C62XVF_HW_DATA_H_
+
+#define ADF_C62XIOV_PMISC_BAR 1
+#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
+#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
+#define ADF_C62XIOV_MAX_ACCELERATORS 1
+#define ADF_C62XIOV_MAX_ACCELENGINES 1
+#define ADF_C62XIOV_RX_RINGS_OFFSET 8
+#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
+#define ADF_C62XIOV_ETR_BAR 0
+#define ADF_C62XIOV_ETR_MAX_BANKS 1
+#define ADF_C62XIOV_PF2VF_OFFSET	0x200
+#define ADF_C62XIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
new file mode 100644
index 0000000..278452b
--- /dev/null
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62xvf_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C62XVF_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C62XIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_c62xiov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_C62XIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c62xiov(accel_dev->hw_device);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
new file mode 100644
index 0000000..47a8e3d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+	adf_isr.o \
+	adf_ctl_drv.o \
+	adf_dev_mgr.o \
+	adf_init.o \
+	adf_accel_engine.o \
+	adf_aer.o \
+	adf_transport.o \
+	adf_admin.o \
+	adf_hw_arbiter.o \
+	qat_crypto.o \
+	qat_algs.o \
+	qat_asym_algs.o \
+	qat_uclo.o \
+	qat_hal.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
+			       adf_vf2pf_msg.o adf_vf_isr.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
new file mode 100644
index 0000000..33f0a62
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -0,0 +1,258 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include "adf_cfg_common.h"
+
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
+#define ADF_C3XXX_DEVICE_NAME "c3xxx"
+#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
+#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
+#define ADF_C62X_PCI_DEVICE_ID 0x37c8
+#define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9
+#define ADF_C3XXX_PCI_DEVICE_ID 0x19e2
+#define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3
+#define ADF_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_DEVICE_FUSECTL_OFFSET 0x40
+#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 16
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+	ADF_ACCEL_CAPABILITIES_NULL = 0,
+	ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+	ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+	ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+	ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+	ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+	ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+	ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+	resource_size_t base_addr;
+	void __iomem *virt_addr;
+	resource_size_t size;
+} __packed;
+
+struct adf_accel_msix {
+	struct msix_entry *entries;
+	char **names;
+	u32 num_entries;
+} __packed;
+
+struct adf_accel_pci {
+	struct pci_dev *pci_dev;
+	struct adf_accel_msix msix_entries;
+	struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+	uint8_t revid;
+	uint8_t sku;
+} __packed;
+
+enum dev_state {
+	DEV_DOWN = 0,
+	DEV_UP
+};
+
+enum dev_sku_info {
+	DEV_SKU_1 = 0,
+	DEV_SKU_2,
+	DEV_SKU_3,
+	DEV_SKU_4,
+	DEV_SKU_VF,
+	DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+	switch (info) {
+	case DEV_SKU_1:
+		return "SKU1";
+	case DEV_SKU_2:
+		return "SKU2";
+	case DEV_SKU_3:
+		return "SKU3";
+	case DEV_SKU_4:
+		return "SKU4";
+	case DEV_SKU_VF:
+		return "SKUVF";
+	case DEV_SKU_UNKNOWN:
+	default:
+		break;
+	}
+	return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+	const char *name;
+	const enum adf_device_type type;
+	uint32_t instances;
+} __packed;
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_hw_device_data {
+	struct adf_hw_device_class *dev_class;
+	uint32_t (*get_accel_mask)(uint32_t fuse);
+	uint32_t (*get_ae_mask)(uint32_t fuse);
+	uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
+	uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
+	uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
+	uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
+	uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+	uint32_t (*get_pf2vf_offset)(uint32_t i);
+	uint32_t (*get_vintmsk_offset)(uint32_t i);
+	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+	void (*free_irq)(struct adf_accel_dev *accel_dev);
+	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+	int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
+	void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+	int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+	int (*init_arb)(struct adf_accel_dev *accel_dev);
+	void (*exit_arb)(struct adf_accel_dev *accel_dev);
+	void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
+				const uint32_t **cfg);
+	void (*disable_iov)(struct adf_accel_dev *accel_dev);
+	void (*enable_ints)(struct adf_accel_dev *accel_dev);
+	int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+	void (*reset_device)(struct adf_accel_dev *accel_dev);
+	const char *fw_name;
+	const char *fw_mmp_name;
+	uint32_t fuses;
+	uint32_t accel_capabilities_mask;
+	uint32_t instance_id;
+	uint16_t accel_mask;
+	uint16_t ae_mask;
+	uint16_t tx_rings_mask;
+	uint8_t tx_rx_gap;
+	uint8_t num_banks;
+	uint8_t num_accel;
+	uint8_t num_logical_accel;
+	uint8_t num_engines;
+	uint8_t min_iov_compat_ver;
+} __packed;
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+	__raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+	struct icp_qat_fw_loader_handle *fw_loader;
+	const struct firmware *uof_fw;
+	const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+	struct adf_accel_dev *accel_dev;
+	struct tasklet_struct vf2pf_bh_tasklet;
+	struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+	struct ratelimit_state vf2pf_ratelimit;
+	u32 vf_nr;
+	bool init;
+};
+
+struct adf_accel_dev {
+	struct adf_etr_data *transport;
+	struct adf_hw_device_data *hw_device;
+	struct adf_cfg_device_data *cfg;
+	struct adf_fw_loader_data *fw_loader;
+	struct adf_admin_comms *admin;
+	struct list_head crypto_list;
+	unsigned long status;
+	atomic_t ref_count;
+	struct dentry *debugfs_dir;
+	struct list_head list;
+	struct module *owner;
+	struct adf_accel_pci accel_pci_dev;
+	union {
+		struct {
+			/* vf_info is non-zero when SR-IOV is init'ed */
+			struct adf_accel_vf_info *vf_info;
+		} pf;
+		struct {
+			char *irq_name;
+			struct tasklet_struct pf2vf_bh_tasklet;
+			struct mutex vf2pf_lock; /* protect CSR access */
+			struct completion iov_msg_completion;
+			uint8_t compatible;
+			uint8_t pf_version;
+		} vf;
+	};
+	bool is_vf;
+	u32 accel_id;
+} __packed;
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
new file mode 100644
index 0000000..a42fc42
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -0,0 +1,209 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	void *uof_addr, *mmp_addr;
+	u32 uof_size, mmp_size;
+
+	if (!hw_device->fw_name)
+		return 0;
+
+	if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+			hw_device->fw_mmp_name);
+		return -EFAULT;
+	}
+	if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
+			hw_device->fw_name);
+		goto out_err;
+	}
+
+	uof_size = loader_data->uof_fw->size;
+	uof_addr = (void *)loader_data->uof_fw->data;
+	mmp_size = loader_data->mmp_fw->size;
+	mmp_addr = (void *)loader_data->mmp_fw->data;
+	if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
+		goto out_err;
+	}
+	if (qat_uclo_map_obj(loader_data->fw_loader, uof_addr, uof_size)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
+		goto out_err;
+	}
+	if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
+		goto out_err;
+	}
+	return 0;
+
+out_err:
+	adf_ae_fw_release(accel_dev);
+	return -EFAULT;
+}
+
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return;
+
+	qat_uclo_del_uof_obj(loader_data->fw_loader);
+	qat_hal_deinit(loader_data->fw_loader);
+	release_firmware(loader_data->uof_fw);
+	release_firmware(loader_data->mmp_fw);
+	loader_data->uof_fw = NULL;
+	loader_data->mmp_fw = NULL;
+	loader_data->fw_loader = NULL;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+	if (!hw_data->fw_name)
+		return 0;
+
+	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+		if (hw_data->ae_mask & (1 << ae)) {
+			qat_hal_start(loader_data->fw_loader, ae, 0xFF);
+			ae_ctr++;
+		}
+	}
+	dev_info(&GET_DEV(accel_dev),
+		 "qat_dev%d started %d acceleration engines\n",
+		 accel_dev->accel_id, ae_ctr);
+	return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+	if (!hw_data->fw_name)
+		return 0;
+
+	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+		if (hw_data->ae_mask & (1 << ae)) {
+			qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+			ae_ctr++;
+		}
+	}
+	dev_info(&GET_DEV(accel_dev),
+		 "qat_dev%d stopped %d acceleration engines\n",
+		 accel_dev->accel_id, ae_ctr);
+	return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+	qat_hal_reset(loader_data->fw_loader);
+	if (qat_hal_clr_reset(loader_data->fw_loader))
+		return -EFAULT;
+
+	return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return 0;
+
+	loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+	if (!loader_data)
+		return -ENOMEM;
+
+	accel_dev->fw_loader = loader_data;
+	if (qat_hal_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
+		kfree(loader_data);
+		return -EFAULT;
+	}
+	if (adf_ae_reset(accel_dev, 0)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
+		qat_hal_deinit(loader_data->fw_loader);
+		kfree(loader_data);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return 0;
+
+	qat_hal_deinit(loader_data->fw_loader);
+	kfree(accel_dev->fw_loader);
+	accel_dev->fw_loader = NULL;
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
new file mode 100644
index 0000000..3744b22
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -0,0 +1,296 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_fw_init_admin.h"
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+
+static const u8 const_tab[1024] __aligned(1024) = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+	dma_addr_t phy_addr;
+	dma_addr_t const_tbl_addr;
+	void *virt_addr;
+	void *virt_tbl_addr;
+	void __iomem *mailbox_addr;
+	struct mutex lock;	/* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+				  void *in, void *out)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+	int offset = ae * ADF_ADMINMSG_LEN * 2;
+	void __iomem *mailbox = admin->mailbox_addr;
+	int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+	int times, received;
+
+	mutex_lock(&admin->lock);
+
+	if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+		mutex_unlock(&admin->lock);
+		return -EAGAIN;
+	}
+
+	memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+	ADF_CSR_WR(mailbox, mb_offset, 1);
+	received = 0;
+	for (times = 0; times < 50; times++) {
+		msleep(20);
+		if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+			received = 1;
+			break;
+		}
+	}
+	if (received)
+		memcpy(out, admin->virt_addr + offset +
+		       ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+	else
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send admin msg to accelerator\n");
+
+	mutex_unlock(&admin->lock);
+	return received ? 0 : -EFAULT;
+}
+
+static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct icp_qat_fw_init_admin_req req;
+	struct icp_qat_fw_init_admin_resp resp;
+	int i;
+
+	memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+	req.init_admin_cmd_id = cmd;
+
+	if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
+		req.init_cfg_sz = 1024;
+		req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+	}
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+		if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+		    resp.init_resp_hdr.status)
+			return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+	int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+
+	if (ret)
+		return ret;
+	return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+		&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *csr = pmisc->virt_addr;
+	void __iomem *mailbox = (void __iomem *)((uintptr_t)csr +
+				 ADF_DH895XCC_MAILBOX_BASE_OFFSET);
+	u64 reg_val;
+
+	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+			     dev_to_node(&GET_DEV(accel_dev)));
+	if (!admin)
+		return -ENOMEM;
+	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+					       &admin->phy_addr, GFP_KERNEL);
+	if (!admin->virt_addr) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+		kfree(admin);
+		return -ENOMEM;
+	}
+
+	admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
+						   PAGE_SIZE,
+						   &admin->const_tbl_addr,
+						   GFP_KERNEL);
+	if (!admin->virt_tbl_addr) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+		kfree(admin);
+		return -ENOMEM;
+	}
+
+	memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
+	reg_val = (u64)admin->phy_addr;
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+	mutex_init(&admin->lock);
+	admin->mailbox_addr = mailbox;
+	accel_dev->admin = admin;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+
+	if (!admin)
+		return;
+
+	if (admin->virt_addr)
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+	if (admin->virt_tbl_addr)
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_tbl_addr, admin->const_tbl_addr);
+
+	mutex_destroy(&admin->lock);
+	kfree(admin);
+	accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
new file mode 100644
index 0000000..9225d06
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -0,0 +1,271 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+					   pci_channel_state_t state)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
+	if (!accel_dev) {
+		dev_err(&pdev->dev, "Can't find acceleration device\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (state == pci_channel_io_perm_failure) {
+		dev_err(&pdev->dev, "Can't recover from device error\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+	int mode;
+	struct adf_accel_dev *accel_dev;
+	struct completion compl;
+	struct work_struct reset_work;
+};
+
+void adf_reset_sbr(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	struct pci_dev *parent = pdev->bus->self;
+	uint16_t bridge_ctl = 0;
+
+	if (!parent)
+		parent = pdev;
+
+	if (!pci_wait_for_pending_transaction(pdev))
+		dev_info(&GET_DEV(accel_dev),
+			 "Transaction still in progress. Proceeding\n");
+
+	dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
+
+	pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+	bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+	pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+	msleep(100);
+	bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+	pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+	msleep(100);
+}
+EXPORT_SYMBOL_GPL(adf_reset_sbr);
+
+void adf_reset_flr(struct adf_accel_dev *accel_dev)
+{
+	pcie_flr(accel_to_pci_dev(accel_dev));
+}
+EXPORT_SYMBOL_GPL(adf_reset_flr);
+
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	if (hw_device->reset_device) {
+		dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+			 accel_dev->accel_id);
+		hw_device->reset_device(accel_dev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+	}
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+	struct adf_reset_dev_data *reset_data =
+		  container_of(work, struct adf_reset_dev_data, reset_work);
+	struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+	adf_dev_restarting_notify(accel_dev);
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
+		/* The device hanged and we can't restart it so stop here */
+		dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+		kfree(reset_data);
+		WARN(1, "QAT: device restart failed. Device is unusable\n");
+		return;
+	}
+	adf_dev_restarted_notify(accel_dev);
+	clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+	/* The dev is back alive. Notify the caller if in sync mode */
+	if (reset_data->mode == ADF_DEV_RESET_SYNC)
+		complete(&reset_data->compl);
+	else
+		kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+				      enum adf_dev_reset_mode mode)
+{
+	struct adf_reset_dev_data *reset_data;
+
+	if (!adf_dev_started(accel_dev) ||
+	    test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+		return 0;
+
+	set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+	reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+	if (!reset_data)
+		return -ENOMEM;
+	reset_data->accel_dev = accel_dev;
+	init_completion(&reset_data->compl);
+	reset_data->mode = mode;
+	INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+	queue_work(device_reset_wq, &reset_data->reset_work);
+
+	/* If in sync mode wait for the result */
+	if (mode == ADF_DEV_RESET_SYNC) {
+		int ret = 0;
+		/* Maximum device reset time is 10 seconds */
+		unsigned long wait_jiffies = msecs_to_jiffies(10000);
+		unsigned long timeout = wait_for_completion_timeout(
+				   &reset_data->compl, wait_jiffies);
+		if (!timeout) {
+			dev_err(&GET_DEV(accel_dev),
+				"Reset device timeout expired\n");
+			ret = -EFAULT;
+		}
+		kfree(reset_data);
+		return ret;
+	}
+	return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Can't find acceleration device\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+	if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+	dev_info(&pdev->dev, "Acceleration driver reset completed\n");
+	dev_info(&pdev->dev, "Device is up and running\n");
+}
+
+static const struct pci_error_handlers adf_err_handler = {
+	.error_detected = adf_error_detected,
+	.slot_reset = adf_slot_reset,
+	.resume = adf_resume,
+};
+
+/**
+ * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ * @adf:        PCI device driver owning the given acceleration device.
+ *
+ * Function enables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	adf->err_handler = &adf_err_handler;
+	pci_enable_pcie_error_reporting(pdev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_aer);
+
+/**
+ * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_disable_aer(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	pci_disable_pcie_error_reporting(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_disable_aer);
+
+int adf_init_aer(void)
+{
+	device_reset_wq = alloc_workqueue("qat_device_reset_wq",
+					  WQ_MEM_RECLAIM, 0);
+	return !device_reset_wq ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+	if (device_reset_wq)
+		destroy_workqueue(device_reset_wq);
+	device_reset_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
new file mode 100644
index 0000000..d087979
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -0,0 +1,367 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+	struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+	mutex_lock(&qat_cfg_read_lock);
+	return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+	struct list_head *list;
+	struct adf_cfg_section *sec =
+				list_entry(v, struct adf_cfg_section, list);
+
+	seq_printf(sfile, "[%s]\n", sec->name);
+	list_for_each(list, &sec->param_head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list, struct adf_cfg_key_val, list);
+		seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+	}
+	return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+	return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+	.start = qat_dev_cfg_start,
+	.next = qat_dev_cfg_next,
+	.stop = qat_dev_cfg_stop,
+	.show = qat_dev_cfg_show
+};
+
+static int qat_dev_cfg_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &qat_dev_cfg_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations qat_dev_cfg_fops = {
+	.open = qat_dev_cfg_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data;
+
+	dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+	if (!dev_cfg_data)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+	init_rwsem(&dev_cfg_data->lock);
+	accel_dev->cfg = dev_cfg_data;
+
+	/* accel_dev->debugfs_dir should always be non-NULL here */
+	dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+						  accel_dev->debugfs_dir,
+						  dev_cfg_data,
+						  &qat_dev_cfg_fops);
+	if (!dev_cfg_data->debug) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to create qat cfg debugfs entry.\n");
+		kfree(dev_cfg_data);
+		accel_dev->cfg = NULL;
+		return -EFAULT;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+	down_write(&dev_cfg_data->lock);
+	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+	up_write(&dev_cfg_data->lock);
+	clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+	if (!dev_cfg_data)
+		return;
+
+	down_write(&dev_cfg_data->lock);
+	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+	up_write(&dev_cfg_data->lock);
+	debugfs_remove(dev_cfg_data->debug);
+	kfree(dev_cfg_data);
+	accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+			       struct adf_cfg_section *sec)
+{
+	list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+	struct list_head *list_ptr, *tmp;
+
+	list_for_each_prev_safe(list_ptr, tmp, head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list_ptr, struct adf_cfg_key_val, list);
+		list_del(list_ptr);
+		kfree(ptr);
+	}
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+	struct adf_cfg_section *ptr;
+	struct list_head *list, *tmp;
+
+	list_for_each_prev_safe(list, tmp, head) {
+		ptr = list_entry(list, struct adf_cfg_section, list);
+		adf_cfg_keyval_del_all(&ptr->param_head);
+		list_del(list);
+		kfree(ptr);
+	}
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+						      const char *key)
+{
+	struct list_head *list;
+
+	list_for_each(list, &s->param_head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list, struct adf_cfg_key_val, list);
+		if (!strcmp(ptr->key, key))
+			return ptr;
+	}
+	return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+						const char *sec_name)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct list_head *list;
+
+	list_for_each(list, &cfg->sec_list) {
+		struct adf_cfg_section *ptr =
+			list_entry(list, struct adf_cfg_section, list);
+		if (!strcmp(ptr->name, sec_name))
+			return ptr;
+	}
+	return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+			       const char *sec_name,
+			       const char *key_name,
+			       char *val)
+{
+	struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+	struct adf_cfg_key_val *keyval = NULL;
+
+	if (sec)
+		keyval = adf_cfg_key_value_find(sec, key_name);
+	if (keyval) {
+		memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+		return 0;
+	}
+	return -1;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+				const char *section_name,
+				const char *key, const void *val,
+				enum adf_cfg_val_type type)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct adf_cfg_key_val *key_val;
+	struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+							   section_name);
+	if (!section)
+		return -EFAULT;
+
+	key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+	if (!key_val)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&key_val->list);
+	strlcpy(key_val->key, key, sizeof(key_val->key));
+
+	if (type == ADF_DEC) {
+		snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+			 "%ld", (*((long *)val)));
+	} else if (type == ADF_STR) {
+		strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+	} else if (type == ADF_HEX) {
+		snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+			 "0x%lx", (unsigned long)val);
+	} else {
+		dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
+		kfree(key_val);
+		return -1;
+	}
+	key_val->type = type;
+	down_write(&cfg->lock);
+	adf_cfg_keyval_add(key_val, section);
+	up_write(&cfg->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+	if (sec)
+		return 0;
+
+	sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+	if (!sec)
+		return -ENOMEM;
+
+	strlcpy(sec->name, name, sizeof(sec->name));
+	INIT_LIST_HEAD(&sec->param_head);
+	down_write(&cfg->lock);
+	list_add_tail(&sec->list, &cfg->sec_list);
+	up_write(&cfg->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+			    const char *section, const char *name,
+			    char *value)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	int ret;
+
+	down_read(&cfg->lock);
+	ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+	up_read(&cfg->lock);
+	return ret;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
new file mode 100644
index 0000000..6a9c6f6
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg.h
@@ -0,0 +1,87 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	enum adf_cfg_val_type type;
+	struct list_head list;
+};
+
+struct adf_cfg_section {
+	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+	struct list_head list;
+	struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+	struct list_head sec_list;
+	struct dentry *debug;
+	struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+				const char *section_name,
+				const char *key, const void *val,
+				enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+			    const char *section, const char *name, char *value);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
new file mode 100644
index 0000000..1211261
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -0,0 +1,106 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES (32 * 32)
+#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
+
+enum adf_cfg_val_type {
+	ADF_DEC,
+	ADF_HEX,
+	ADF_STR
+};
+
+enum adf_device_type {
+	DEV_UNKNOWN = 0,
+	DEV_DH895XCC,
+	DEV_DH895XCCVF,
+	DEV_C62X,
+	DEV_C62XVF,
+	DEV_C3XXX,
+	DEV_C3XXXVF
+};
+
+struct adf_dev_status_info {
+	enum adf_device_type type;
+	u32 accel_id;
+	u32 instance_id;
+	uint8_t num_ae;
+	uint8_t num_accel;
+	uint8_t num_logical_accel;
+	uint8_t banks_per_accel;
+	uint8_t state;
+	uint8_t bus;
+	uint8_t dev;
+	uint8_t fun;
+	char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
new file mode 100644
index 0000000..7632ed0
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -0,0 +1,81 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RingSymRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_BANK_NUM "BankNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+	ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+	ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+	ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+	ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
new file mode 100644
index 0000000..b5484bf
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -0,0 +1,82 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	union {
+		struct adf_user_cfg_key_val *next;
+		uint64_t padding3;
+	};
+	enum adf_cfg_val_type type;
+} __packed;
+
+struct adf_user_cfg_section {
+	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+	union {
+		struct adf_user_cfg_key_val *params;
+		uint64_t padding1;
+	};
+	union {
+		struct adf_user_cfg_section *next;
+		uint64_t padding3;
+	};
+} __packed;
+
+struct adf_user_cfg_ctl_data {
+	union {
+		struct adf_user_cfg_section *config_section;
+		uint64_t padding;
+	};
+	uint8_t device_id;
+} __packed;
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
new file mode 100644
index 0000000..5c4c0a2
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -0,0 +1,294 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_MAJOR_VERSION	0
+#define ADF_MINOR_VERSION	6
+#define ADF_BUILD_VERSION	0
+#define ADF_DRV_VERSION		__stringify(ADF_MAJOR_VERSION) "." \
+				__stringify(ADF_MINOR_VERSION) "." \
+				__stringify(ADF_BUILD_VERSION)
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_PF_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+
+enum adf_dev_reset_mode {
+	ADF_DEV_RESET_ASYNC = 0,
+	ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+	ADF_EVENT_INIT = 0,
+	ADF_EVENT_START,
+	ADF_EVENT_STOP,
+	ADF_EVENT_SHUTDOWN,
+	ADF_EVENT_RESTARTING,
+	ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+	int (*event_hld)(struct adf_accel_dev *accel_dev,
+			 enum adf_event event);
+	unsigned long init_status[ADF_DEVS_ARRAY_SIZE];
+	unsigned long start_status[ADF_DEVS_ARRAY_SIZE];
+	char *name;
+	struct list_head list;
+};
+
+static inline int get_current_node(void)
+{
+	return topology_physical_package_id(smp_processor_id());
+}
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_init(struct adf_accel_dev *accel_dev);
+int adf_dev_start(struct adf_accel_dev *accel_dev);
+void adf_dev_stop(struct adf_accel_dev *accel_dev);
+void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
+int adf_ctl_dev_register(void);
+void adf_ctl_dev_unregister(void);
+int adf_processes_dev_register(void);
+void adf_processes_dev_unregister(void);
+
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(uint32_t id);
+void adf_devmgr_get_num_dev(uint32_t *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+void adf_disable_aer(struct adf_accel_dev *accel_dev);
+void adf_reset_sbr(struct adf_accel_dev *accel_dev);
+void adf_reset_flr(struct adf_accel_dev *accel_dev);
+void adf_dev_restore(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+int qat_crypto_dev_config(struct adf_accel_dev *accel_dev);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
+int qat_algs_register(void);
+void qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		   unsigned int ctx_mask);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		  unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+			  unsigned char ae, unsigned int ctx_mask);
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+			    unsigned int ae);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
+			   unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+			    unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+		       unsigned char ae, unsigned int uaddr,
+		       unsigned int words_num, uint64_t *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		     unsigned int uword_addr, unsigned int words_num,
+		     unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+			unsigned char ae,
+			struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned char ctx_mask,
+		     enum icp_qat_uof_regtype reg_type,
+		     unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned char ctx_mask,
+		    unsigned short reg_num, unsigned int regdata);
+int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
+		  unsigned char ae, unsigned short lm_addr, unsigned int value);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
+		       int mem_size);
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+		     void *addr_ptr, int mem_size);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				  uint32_t vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				 uint32_t vf_mask);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
+#else
+static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+	return 0;
+}
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_init_pf_wq(void)
+{
+	return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
+
+static inline int adf_init_vf_wq(void)
+{
+	return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
+#endif
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
new file mode 100644
index 0000000..abc7a7f
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -0,0 +1,512 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/crypto.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = adf_ctl_ioctl,
+	.compat_ioctl = adf_ctl_ioctl,
+};
+
+struct adf_ctl_drv_info {
+	unsigned int major;
+	struct cdev drv_cdev;
+	struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adf_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+	device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
+	cdev_del(&adf_ctl_drv.drv_cdev);
+	class_destroy(adf_ctl_drv.drv_class);
+	unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+	dev_t dev_id;
+	struct device *drv_device;
+
+	if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+		pr_err("QAT: unable to allocate chrdev region\n");
+		return -EFAULT;
+	}
+
+	adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(adf_ctl_drv.drv_class)) {
+		pr_err("QAT: class_create failed for adf_ctl\n");
+		goto err_chrdev_unreg;
+	}
+	adf_ctl_drv.major = MAJOR(dev_id);
+	cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
+	if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
+		pr_err("QAT: cdev add failed\n");
+		goto err_class_destr;
+	}
+
+	drv_device = device_create(adf_ctl_drv.drv_class, NULL,
+				   MKDEV(adf_ctl_drv.major, 0),
+				   NULL, DEVICE_NAME);
+	if (IS_ERR(drv_device)) {
+		pr_err("QAT: failed to create device\n");
+		goto err_cdev_del;
+	}
+	return 0;
+err_cdev_del:
+	cdev_del(&adf_ctl_drv.drv_cdev);
+err_class_destr:
+	class_destroy(adf_ctl_drv.drv_class);
+err_chrdev_unreg:
+	unregister_chrdev_region(dev_id, 1);
+	return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+				   unsigned long arg)
+{
+	struct adf_user_cfg_ctl_data *cfg_data;
+
+	cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+	if (!cfg_data)
+		return -ENOMEM;
+
+	/* Initialize device id to NO DEVICE as 0 is a valid device id */
+	cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+	if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+		pr_err("QAT: failed to copy from user cfg_data.\n");
+		kfree(cfg_data);
+		return -EIO;
+	}
+
+	*ctl_data = cfg_data;
+	return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+				  const char *section,
+				  const struct adf_user_cfg_key_val *key_val)
+{
+	if (key_val->type == ADF_HEX) {
+		long *ptr = (long *)key_val->val;
+		long val = *ptr;
+
+		if (adf_cfg_add_key_value_param(accel_dev, section,
+						key_val->key, (void *)val,
+						key_val->type)) {
+			dev_err(&GET_DEV(accel_dev),
+				"failed to add hex keyvalue.\n");
+			return -EFAULT;
+		}
+	} else {
+		if (adf_cfg_add_key_value_param(accel_dev, section,
+						key_val->key, key_val->val,
+						key_val->type)) {
+			dev_err(&GET_DEV(accel_dev),
+				"failed to add keyvalue.\n");
+			return -EFAULT;
+		}
+	}
+	return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+				   struct adf_user_cfg_ctl_data *ctl_data)
+{
+	struct adf_user_cfg_key_val key_val;
+	struct adf_user_cfg_key_val *params_head;
+	struct adf_user_cfg_section section, *section_head;
+
+	section_head = ctl_data->config_section;
+
+	while (section_head) {
+		if (copy_from_user(&section, (void __user *)section_head,
+				   sizeof(*section_head))) {
+			dev_err(&GET_DEV(accel_dev),
+				"failed to copy section info\n");
+			goto out_err;
+		}
+
+		if (adf_cfg_section_add(accel_dev, section.name)) {
+			dev_err(&GET_DEV(accel_dev),
+				"failed to add section.\n");
+			goto out_err;
+		}
+
+		params_head = section.params;
+
+		while (params_head) {
+			if (copy_from_user(&key_val, (void __user *)params_head,
+					   sizeof(key_val))) {
+				dev_err(&GET_DEV(accel_dev),
+					"Failed to copy keyvalue.\n");
+				goto out_err;
+			}
+			if (adf_add_key_value_data(accel_dev, section.name,
+						   &key_val)) {
+				goto out_err;
+			}
+			params_head = key_val.next;
+		}
+		section_head = section.next;
+	}
+	return 0;
+out_err:
+	adf_cfg_del_all(accel_dev);
+	return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+				    unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+	struct adf_accel_dev *accel_dev;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+	if (!accel_dev) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (adf_dev_started(accel_dev)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+	struct adf_accel_dev *dev;
+
+	list_for_each_entry(dev, adf_devmgr_get_head(), list) {
+		if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+				dev_info(&GET_DEV(dev),
+					 "device qat_dev%d is busy\n",
+					 dev->accel_id);
+				return -EBUSY;
+			}
+		}
+	}
+	return 0;
+}
+
+static void adf_ctl_stop_devices(uint32_t id)
+{
+	struct adf_accel_dev *accel_dev;
+
+	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (!adf_dev_started(accel_dev))
+				continue;
+
+			/* First stop all VFs */
+			if (!accel_dev->is_vf)
+				continue;
+
+			adf_dev_stop(accel_dev);
+			adf_dev_shutdown(accel_dev);
+		}
+	}
+
+	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (!adf_dev_started(accel_dev))
+				continue;
+
+			adf_dev_stop(accel_dev);
+			adf_dev_shutdown(accel_dev);
+		}
+	}
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+				  unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	if (adf_devmgr_verify_id(ctl_data->device_id)) {
+		pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+	if (ret)
+		goto out;
+
+	if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+		pr_info("QAT: Stopping all acceleration devices.\n");
+	else
+		pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+			ctl_data->device_id);
+
+	adf_ctl_stop_devices(ctl_data->device_id);
+
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+				   unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+	struct adf_accel_dev *accel_dev;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	ret = -ENODEV;
+	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+	if (!accel_dev)
+		goto out;
+
+	if (!adf_dev_started(accel_dev)) {
+		dev_info(&GET_DEV(accel_dev),
+			 "Starting acceleration device qat_dev%d.\n",
+			 ctl_data->device_id);
+		ret = adf_dev_init(accel_dev);
+		if (!ret)
+			ret = adf_dev_start(accel_dev);
+	} else {
+		dev_info(&GET_DEV(accel_dev),
+			 "Acceleration device qat_dev%d already started.\n",
+			 ctl_data->device_id);
+	}
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+			ctl_data->device_id);
+		adf_dev_stop(accel_dev);
+		adf_dev_shutdown(accel_dev);
+	}
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+					 unsigned long arg)
+{
+	uint32_t num_devices = 0;
+
+	adf_devmgr_get_num_dev(&num_devices);
+	if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+				    unsigned long arg)
+{
+	struct adf_hw_device_data *hw_data;
+	struct adf_dev_status_info dev_info;
+	struct adf_accel_dev *accel_dev;
+
+	if (copy_from_user(&dev_info, (void __user *)arg,
+			   sizeof(struct adf_dev_status_info))) {
+		pr_err("QAT: failed to copy from user.\n");
+		return -EFAULT;
+	}
+
+	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+	if (!accel_dev)
+		return -ENODEV;
+
+	hw_data = accel_dev->hw_device;
+	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+	dev_info.num_ae = hw_data->get_num_aes(hw_data);
+	dev_info.num_accel = hw_data->get_num_accels(hw_data);
+	dev_info.num_logical_accel = hw_data->num_logical_accel;
+	dev_info.banks_per_accel = hw_data->num_banks
+					/ hw_data->num_logical_accel;
+	strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+	dev_info.instance_id = hw_data->instance_id;
+	dev_info.type = hw_data->dev_class->type;
+	dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+	dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+	dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+	if (copy_to_user((void __user *)arg, &dev_info,
+			 sizeof(struct adf_dev_status_info))) {
+		dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+
+	if (mutex_lock_interruptible(&adf_ctl_lock))
+		return -EFAULT;
+
+	switch (cmd) {
+	case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+		ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+		break;
+
+	case IOCTL_STOP_ACCEL_DEV:
+		ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+		break;
+
+	case IOCTL_START_ACCEL_DEV:
+		ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+		break;
+
+	case IOCTL_GET_NUM_DEVICES:
+		ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+		break;
+
+	case IOCTL_STATUS_ACCEL_DEV:
+		ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+		break;
+	default:
+		pr_err("QAT: Invalid ioctl\n");
+		ret = -EFAULT;
+		break;
+	}
+	mutex_unlock(&adf_ctl_lock);
+	return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+	mutex_init(&adf_ctl_lock);
+
+	if (adf_chr_drv_create())
+		goto err_chr_dev;
+
+	if (adf_init_aer())
+		goto err_aer;
+
+	if (adf_init_pf_wq())
+		goto err_pf_wq;
+
+	if (adf_init_vf_wq())
+		goto err_vf_wq;
+
+	if (qat_crypto_register())
+		goto err_crypto_register;
+
+	return 0;
+
+err_crypto_register:
+	adf_exit_vf_wq();
+err_vf_wq:
+	adf_exit_pf_wq();
+err_pf_wq:
+	adf_exit_aer();
+err_aer:
+	adf_chr_drv_destroy();
+err_chr_dev:
+	mutex_destroy(&adf_ctl_lock);
+	return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+	adf_chr_drv_destroy();
+	adf_exit_aer();
+	adf_exit_vf_wq();
+	adf_exit_pf_wq();
+	qat_crypto_unregister();
+	adf_clean_vf_map(false);
+	mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS_CRYPTO("intel_qat");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
new file mode 100644
index 0000000..2d06409
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -0,0 +1,493 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
+static DEFINE_MUTEX(table_lock);
+static uint32_t num_devices;
+static u8 id_map[ADF_MAX_DEVICES];
+
+struct vf_id_map {
+	u32 bdf;
+	u32 id;
+	u32 fake_id;
+	bool attached;
+	struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+	return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+		PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+		(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+	return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+
+		if (ptr->bdf == bdf)
+			return ptr;
+	}
+	return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+		if (ptr->fake_id == fake)
+			return ptr->id;
+	}
+	return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ *	for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+	struct vf_id_map *map;
+	struct list_head *ptr, *tmp;
+
+	mutex_lock(&table_lock);
+	list_for_each_safe(ptr, tmp, &vfs_table) {
+		map = list_entry(ptr, struct vf_id_map, list);
+		if (map->bdf != -1) {
+			id_map[map->id] = 0;
+			num_devices--;
+		}
+
+		if (vf && map->bdf == -1)
+			continue;
+
+		list_del(ptr);
+		kfree(map);
+	}
+	mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data:  Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+	struct adf_hw_device_class *class = hw_data->dev_class;
+	struct list_head *itr;
+	int i = 0;
+
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr->hw_device->dev_class == class)
+			ptr->hw_device->instance_id = i++;
+
+		if (i == class->instances)
+			break;
+	}
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
+static unsigned int adf_find_free_id(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ADF_MAX_DEVICES; i++) {
+		if (!id_map[i]) {
+			id_map[i] = 1;
+			return i;
+		}
+	}
+	return ADF_MAX_DEVICES + 1;
+}
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev:  Pointer to acceleration device.
+ * @pf:		Corresponding PF if the accel_dev is a VF
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf)
+{
+	struct list_head *itr;
+	int ret = 0;
+
+	if (num_devices == ADF_MAX_DEVICES) {
+		dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
+			ADF_MAX_DEVICES);
+		return -EFAULT;
+	}
+
+	mutex_lock(&table_lock);
+	atomic_set(&accel_dev->ref_count, 0);
+
+	/* PF on host or VF on guest */
+	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		struct vf_id_map *map;
+
+		list_for_each(itr, &accel_table) {
+			struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+			if (ptr == accel_dev) {
+				ret = -EEXIST;
+				goto unlock;
+			}
+		}
+
+		list_add_tail(&accel_dev->list, &accel_table);
+		accel_dev->accel_id = adf_find_free_id();
+		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+			ret = -EFAULT;
+			goto unlock;
+		}
+		num_devices++;
+		map = kzalloc(sizeof(*map), GFP_KERNEL);
+		if (!map) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+		map->bdf = ~0;
+		map->id = accel_dev->accel_id;
+		map->fake_id = map->id;
+		map->attached = true;
+		list_add_tail(&map->list, &vfs_table);
+	} else if (accel_dev->is_vf && pf) {
+		/* VF on host */
+		struct vf_id_map *map;
+
+		map = adf_find_vf(adf_get_vf_num(accel_dev));
+		if (map) {
+			struct vf_id_map *next;
+
+			accel_dev->accel_id = map->id;
+			list_add_tail(&accel_dev->list, &accel_table);
+			map->fake_id++;
+			map->attached = true;
+			next = list_next_entry(map, list);
+			while (next && &next->list != &vfs_table) {
+				next->fake_id++;
+				next = list_next_entry(next, list);
+			}
+
+			ret = 0;
+			goto unlock;
+		}
+
+		map = kzalloc(sizeof(*map), GFP_KERNEL);
+		if (!map) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+		accel_dev->accel_id = adf_find_free_id();
+		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+			kfree(map);
+			ret = -EFAULT;
+			goto unlock;
+		}
+		num_devices++;
+		list_add_tail(&accel_dev->list, &accel_table);
+		map->bdf = adf_get_vf_num(accel_dev);
+		map->id = accel_dev->accel_id;
+		map->fake_id = map->id;
+		map->attached = true;
+		list_add_tail(&map->list, &vfs_table);
+	}
+unlock:
+	mutex_unlock(&table_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+	return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev:  Pointer to acceleration device.
+ * @pf:		Corresponding PF if the accel_dev is a VF
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf)
+{
+	mutex_lock(&table_lock);
+	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		id_map[accel_dev->accel_id] = 0;
+		num_devices--;
+	} else if (accel_dev->is_vf && pf) {
+		struct vf_id_map *map, *next;
+
+		map = adf_find_vf(adf_get_vf_num(accel_dev));
+		if (!map) {
+			dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+			goto unlock;
+		}
+		map->fake_id--;
+		map->attached = false;
+		next = list_next_entry(map, list);
+		while (next && &next->list != &vfs_table) {
+			next->fake_id--;
+			next = list_next_entry(next, list);
+		}
+	}
+unlock:
+	list_del(&accel_dev->list);
+	mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+	struct adf_accel_dev *dev = NULL;
+
+	if (!list_empty(&accel_table))
+		dev = list_first_entry(&accel_table, struct adf_accel_dev,
+				       list);
+	return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @accel_dev:  Pointer to pci device.
+ *
+ * Function returns acceleration device associated with the given pci device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pointer to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+	struct list_head *itr;
+
+	mutex_lock(&table_lock);
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+			mutex_unlock(&table_lock);
+			return ptr;
+		}
+	}
+	mutex_unlock(&table_lock);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
+{
+	struct list_head *itr;
+	int real_id;
+
+	mutex_lock(&table_lock);
+	real_id = adf_get_vf_real_id(id);
+	if (real_id < 0)
+		goto unlock;
+
+	id = real_id;
+
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+		if (ptr->accel_id == id) {
+			mutex_unlock(&table_lock);
+			return ptr;
+		}
+	}
+unlock:
+	mutex_unlock(&table_lock);
+	return NULL;
+}
+
+int adf_devmgr_verify_id(uint32_t id)
+{
+	if (id == ADF_CFG_ALL_DEVICES)
+		return 0;
+
+	if (adf_devmgr_get_dev_by_id(id))
+		return 0;
+
+	return -ENODEV;
+}
+
+static int adf_get_num_dettached_vfs(void)
+{
+	struct list_head *itr;
+	int vfs = 0;
+
+	mutex_lock(&table_lock);
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+		if (ptr->bdf != ~0 && !ptr->attached)
+			vfs++;
+	}
+	mutex_unlock(&table_lock);
+	return vfs;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+	*num = num_devices - adf_get_num_dettached_vfs();
+}
+
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+	return atomic_read(&accel_dev->ref_count) != 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
+
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+	if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+		if (!try_module_get(accel_dev->owner))
+			return -EFAULT;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_get);
+
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+	if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+		module_put(accel_dev->owner);
+}
+EXPORT_SYMBOL_GPL(adf_dev_put);
+
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+	return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
+
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+	return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
new file mode 100644
index 0000000..d7dd18d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -0,0 +1,143 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport_internal.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REG_SIZE 0x4
+#define ADF_ARB_WTR_SIZE 0x20
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_WTR_OFFSET 0x010
+#define ADF_ARB_RO_EN_OFFSET 0x090
+#define ADF_ARB_WQCFG_OFFSET 0x100
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+	(ADF_ARB_REG_SLOT * index), value)
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+	u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+	u32 arb, i;
+	const u32 *thd_2_arb_cfg;
+
+	/* Service arb configured for 32 bytes responses and
+	 * ring flow control check enabled. */
+	for (arb = 0; arb < ADF_ARB_NUM; arb++)
+		WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
+
+	/* Setup worker queue registers */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WQCFG(csr, i, i);
+
+	/* Map worker threads to service arbiters */
+	hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
+
+	if (!thd_2_arb_cfg)
+		return -EFAULT;
+
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
+{
+	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
+				   ring->bank->bank_number,
+				   ring->bank->ring_mask & 0xFF);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr;
+	unsigned int i;
+
+	if (!accel_dev->transport)
+		return;
+
+	csr = accel_dev->transport->banks[0].csr_addr;
+
+	/* Reset arbiter configuration */
+	for (i = 0; i < ADF_ARB_NUM; i++)
+		WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
+
+	/* Shutdown work queue */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WQCFG(csr, i, 0);
+
+	/* Unmap worker threads to service arbiters */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
+
+	/* Disable arbitration on all rings */
+	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
+}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
new file mode 100644
index 0000000..26556c7
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -0,0 +1,384 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+	mutex_lock(&service_lock);
+	list_add(&service->list, &service_table);
+	mutex_unlock(&service_lock);
+}
+
+int adf_service_register(struct service_hndl *service)
+{
+	memset(service->init_status, 0, sizeof(service->init_status));
+	memset(service->start_status, 0, sizeof(service->start_status));
+	adf_service_add(service);
+	return 0;
+}
+
+static void adf_service_remove(struct service_hndl *service)
+{
+	mutex_lock(&service_lock);
+	list_del(&service->list);
+	mutex_unlock(&service_lock);
+}
+
+int adf_service_unregister(struct service_hndl *service)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
+		if (service->init_status[i] || service->start_status[i]) {
+			pr_err("QAT: Could not remove active service\n");
+			return -EFAULT;
+		}
+	}
+	adf_service_remove(service);
+	return 0;
+}
+
+/**
+ * adf_dev_init() - Init data structures and services for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Initialize the ring data structures and the admin comms and arbitration
+ * services.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_dev_init(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+	if (!hw_data) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to init device - hw_data not set\n");
+		return -EFAULT;
+	}
+
+	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
+		dev_err(&GET_DEV(accel_dev), "Device not configured\n");
+		return -EFAULT;
+	}
+
+	if (adf_init_etr_data(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
+		return -EFAULT;
+	}
+
+	if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
+		return -EFAULT;
+	}
+
+	if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
+		return -EFAULT;
+	}
+
+	hw_data->enable_ints(accel_dev);
+
+	if (adf_ae_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to initialise Acceleration Engine\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+	if (adf_ae_fw_load(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to load acceleration FW\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+	if (hw_data->alloc_irq(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+	/*
+	 * Subservice initialisation is divided into two stages: init and start.
+	 * This is to facilitate any ordering dependencies between services
+	 * prior to starting any of the accelerators.
+	 */
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to initialise service %s\n",
+				service->name);
+			return -EFAULT;
+		}
+		set_bit(accel_dev->accel_id, service->init_status);
+	}
+
+	hw_data->enable_error_correction(accel_dev);
+	hw_data->enable_vf2pf_comms(accel_dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_init);
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+	if (adf_ae_start(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+	if (hw_data->send_admin_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+		return -EFAULT;
+	}
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to start service %s\n",
+				service->name);
+			return -EFAULT;
+		}
+		set_bit(accel_dev->accel_id, service->start_status);
+	}
+
+	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+	if (!list_empty(&accel_dev->crypto_list) &&
+	    (qat_algs_register() || qat_asym_algs_register())) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to register crypto algs\n");
+		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+		return -EFAULT;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_start);
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+	bool wait = false;
+	int ret;
+
+	if (!adf_dev_started(accel_dev) &&
+	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+		return;
+
+	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+	if (!list_empty(&accel_dev->crypto_list)) {
+		qat_algs_unregister();
+		qat_asym_algs_unregister();
+	}
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!test_bit(accel_dev->accel_id, service->start_status))
+			continue;
+		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+		if (!ret) {
+			clear_bit(accel_dev->accel_id, service->start_status);
+		} else if (ret == -EAGAIN) {
+			wait = true;
+			clear_bit(accel_dev->accel_id, service->start_status);
+		}
+	}
+
+	if (wait)
+		msleep(100);
+
+	if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
+		if (adf_ae_stop(accel_dev))
+			dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
+		else
+			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+	}
+}
+EXPORT_SYMBOL_GPL(adf_dev_stop);
+
+/**
+ * adf_dev_shutdown() - shutdown acceleration services and data strucutures
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Cleanup the ring data structures and the admin comms and arbitration
+ * services.
+ */
+void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	if (!hw_data) {
+		dev_err(&GET_DEV(accel_dev),
+			"QAT: Failed to shutdown device - hw_data not set\n");
+		return;
+	}
+
+	if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+		adf_ae_fw_release(accel_dev);
+		clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+	}
+
+	if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+		if (adf_ae_shutdown(accel_dev))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to shutdown Accel Engine\n");
+		else
+			clear_bit(ADF_STATUS_AE_INITIALISED,
+				  &accel_dev->status);
+	}
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!test_bit(accel_dev->accel_id, service->init_status))
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to shutdown service %s\n",
+				service->name);
+		else
+			clear_bit(accel_dev->accel_id, service->init_status);
+	}
+
+	hw_data->disable_iov(accel_dev);
+
+	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+		hw_data->free_irq(accel_dev);
+		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+	}
+
+	/* Delete configuration only if not restarting */
+	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+		adf_cfg_del_all(accel_dev);
+
+	if (hw_data->exit_arb)
+		hw_data->exit_arb(accel_dev);
+
+	if (hw_data->exit_admin_comms)
+		hw_data->exit_admin_comms(accel_dev);
+
+	adf_cleanup_etr_data(accel_dev);
+	adf_dev_restore(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_dev_shutdown);
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to restart service %s.\n",
+				service->name);
+	}
+	return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to restart service %s.\n",
+				service->name);
+	}
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
new file mode 100644
index 0000000..cd1cdf5
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -0,0 +1,348 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	u32 msix_num_entries = 1;
+
+	/* If SR-IOV is disabled, add entries for each bank */
+	if (!accel_dev->pf.vf_info) {
+		int i;
+
+		msix_num_entries += hw_data->num_banks;
+		for (i = 0; i < msix_num_entries; i++)
+			pci_dev_info->msix_entries.entries[i].entry = i;
+	} else {
+		pci_dev_info->msix_entries.entries[0].entry =
+			hw_data->num_banks;
+	}
+
+	if (pci_enable_msix_exact(pci_dev_info->pci_dev,
+				  pci_dev_info->msix_entries.entries,
+				  msix_num_entries)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+	pci_disable_msix(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+	struct adf_etr_bank_data *bank = bank_ptr;
+
+	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
+	tasklet_hi_schedule(&bank->resp_handler);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+	struct adf_accel_dev *accel_dev = dev_ptr;
+
+#ifdef CONFIG_PCI_IOV
+	/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+	if (accel_dev->pf.vf_info) {
+		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+		struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+		void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+		u32 vf_mask;
+
+		/* Get the interrupt sources triggered by VFs */
+		vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
+			    0x0000FFFF) << 16) |
+			  ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
+			    0x01FFFE00) >> 9);
+
+		if (vf_mask) {
+			struct adf_accel_vf_info *vf_info;
+			bool irq_handled = false;
+			int i;
+
+			/* Disable VF2PF interrupts for VFs with pending ints */
+			adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+
+			/*
+			 * Schedule tasklets to handle VF2PF interrupt BHs
+			 * unless the VF is malicious and is attempting to
+			 * flood the host OS with VF2PF interrupts.
+			 */
+			for_each_set_bit(i, (const unsigned long *)&vf_mask,
+					 (sizeof(vf_mask) * BITS_PER_BYTE)) {
+				vf_info = accel_dev->pf.vf_info + i;
+
+				if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+					dev_info(&GET_DEV(accel_dev),
+						 "Too many ints from VF%d\n",
+						  vf_info->vf_nr + 1);
+					continue;
+				}
+
+				/* Tasklet will re-enable ints from this VF */
+				tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+				irq_handled = true;
+			}
+
+			if (irq_handled)
+				return IRQ_HANDLED;
+		}
+	}
+#endif /* CONFIG_PCI_IOV */
+
+	dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+		accel_dev->accel_id);
+
+	return IRQ_NONE;
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	int ret, i = 0;
+	char *name;
+
+	/* Request msix irq for all banks unless SR-IOV enabled */
+	if (!accel_dev->pf.vf_info) {
+		for (i = 0; i < hw_data->num_banks; i++) {
+			struct adf_etr_bank_data *bank = &etr_data->banks[i];
+			unsigned int cpu, cpus = num_online_cpus();
+
+			name = *(pci_dev_info->msix_entries.names + i);
+			snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+				 "qat%d-bundle%d", accel_dev->accel_id, i);
+			ret = request_irq(msixe[i].vector,
+					  adf_msix_isr_bundle, 0, name, bank);
+			if (ret) {
+				dev_err(&GET_DEV(accel_dev),
+					"failed to enable irq %d for %s\n",
+					msixe[i].vector, name);
+				return ret;
+			}
+
+			cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+			       i) % cpus;
+			irq_set_affinity_hint(msixe[i].vector,
+					      get_cpu_mask(cpu));
+		}
+	}
+
+	/* Request msix irq for AE */
+	name = *(pci_dev_info->msix_entries.names + i);
+	snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+		 "qat%d-ae-cluster", accel_dev->accel_id);
+	ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev),
+			"failed to enable irq %d, for %s\n",
+			msixe[i].vector, name);
+		return ret;
+	}
+	return ret;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	int i = 0;
+
+	if (pci_dev_info->msix_entries.num_entries > 1) {
+		for (i = 0; i < hw_data->num_banks; i++) {
+			irq_set_affinity_hint(msixe[i].vector, NULL);
+			free_irq(msixe[i].vector, &etr_data->banks[i]);
+		}
+	}
+	irq_set_affinity_hint(msixe[i].vector, NULL);
+	free_irq(msixe[i].vector, accel_dev);
+}
+
+static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+	int i;
+	char **names;
+	struct msix_entry *entries;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	u32 msix_num_entries = 1;
+
+	/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+	if (!accel_dev->pf.vf_info)
+		msix_num_entries += hw_data->num_banks;
+
+	entries = kcalloc_node(msix_num_entries, sizeof(*entries),
+			       GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
+	if (!entries)
+		return -ENOMEM;
+
+	names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
+	if (!names) {
+		kfree(entries);
+		return -ENOMEM;
+	}
+	for (i = 0; i < msix_num_entries; i++) {
+		*(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+		if (!(*(names + i)))
+			goto err;
+	}
+	accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
+	accel_dev->accel_pci_dev.msix_entries.entries = entries;
+	accel_dev->accel_pci_dev.msix_entries.names = names;
+	return 0;
+err:
+	for (i = 0; i < msix_num_entries; i++)
+		kfree(*(names + i));
+	kfree(entries);
+	kfree(names);
+	return -ENOMEM;
+}
+
+static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+	char **names = accel_dev->accel_pci_dev.msix_entries.names;
+	int i;
+
+	kfree(accel_dev->accel_pci_dev.msix_entries.entries);
+	for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
+		kfree(*(names + i));
+	kfree(names);
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int i;
+
+	for (i = 0; i < hw_data->num_banks; i++)
+		tasklet_init(&priv_data->banks[i].resp_handler,
+			     adf_response_handler,
+			     (unsigned long)&priv_data->banks[i]);
+	return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int i;
+
+	for (i = 0; i < hw_data->num_banks; i++) {
+		tasklet_disable(&priv_data->banks[i].resp_handler);
+		tasklet_kill(&priv_data->banks[i].resp_handler);
+	}
+}
+
+/**
+ * adf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device.
+ */
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+	adf_free_irqs(accel_dev);
+	adf_cleanup_bh(accel_dev);
+	adf_disable_msix(&accel_dev->accel_pci_dev);
+	adf_isr_free_msix_entry_table(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_free);
+
+/**
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+	int ret;
+
+	ret = adf_isr_alloc_msix_entry_table(accel_dev);
+	if (ret)
+		return ret;
+	if (adf_enable_msix(accel_dev))
+		goto err_out;
+
+	if (adf_setup_bh(accel_dev))
+		goto err_out;
+
+	if (adf_request_irqs(accel_dev))
+		goto err_out;
+
+	return 0;
+err_out:
+	adf_isr_resource_free(accel_dev);
+	return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
new file mode 100644
index 0000000..b3875fd
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -0,0 +1,415 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_DH895XCC_EP_OFFSET	0x3A000
+#define ADF_DH895XCC_ERRMSK3	(ADF_DH895XCC_EP_OFFSET + 0x1C)
+#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
+#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
+
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
+}
+
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				 u32 vf_mask)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 reg;
+
+	/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
+	if (vf_mask & 0xFFFF) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
+		reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+	}
+
+	/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
+	if (vf_mask >> 16) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
+		reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+	}
+}
+
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 reg;
+
+	/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
+	if (vf_mask & 0xFFFF) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
+			ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+	}
+
+	/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
+	if (vf_mask >> 16) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
+			ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+	}
+}
+
+static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+	u32 val, pf2vf_offset, count = 0;
+	u32 local_in_use_mask, local_in_use_pattern;
+	u32 remote_in_use_mask, remote_in_use_pattern;
+	struct mutex *lock;	/* lock preventing concurrent acces of CSR */
+	u32 int_bit;
+	int ret = 0;
+
+	if (accel_dev->is_vf) {
+		pf2vf_offset = hw_data->get_pf2vf_offset(0);
+		lock = &accel_dev->vf.vf2pf_lock;
+		local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+		local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+		remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+		remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+		int_bit = ADF_VF2PF_INT;
+	} else {
+		pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
+		lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
+		local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+		local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+		remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+		remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+		int_bit = ADF_PF2VF_INT;
+	}
+
+	mutex_lock(lock);
+
+	/* Check if PF2VF CSR is in use by remote function */
+	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	if ((val & remote_in_use_mask) == remote_in_use_pattern) {
+		dev_dbg(&GET_DEV(accel_dev),
+			"PF2VF CSR in use by remote function\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* Attempt to get ownership of PF2VF CSR */
+	msg &= ~local_in_use_mask;
+	msg |= local_in_use_pattern;
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
+
+	/* Wait in case remote func also attempting to get ownership */
+	msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
+
+	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	if ((val & local_in_use_mask) != local_in_use_pattern) {
+		dev_dbg(&GET_DEV(accel_dev),
+			"PF2VF CSR in use by remote - collision detected\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
+	 * remain in the PF2VF CSR for all writes including ACK from remote
+	 * until this local function relinquishes the CSR.  Send the message
+	 * by interrupting the remote.
+	 */
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
+
+	/* Wait for confirmation from remote func it received the message */
+	do {
+		msleep(ADF_IOV_MSG_ACK_DELAY);
+		val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+
+	if (val & int_bit) {
+		dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+		val &= ~int_bit;
+		ret = -EIO;
+	}
+
+	/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
+out:
+	mutex_unlock(lock);
+	return ret;
+}
+
+/**
+ * adf_iov_putmsg() - send PF2VF message
+ * @accel_dev:  Pointer to acceleration device.
+ * @msg:	Message to send
+ * @vf_nr:	VF number to which the message will be sent
+ *
+ * Function sends a messge from the PF to a VF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+	u32 count = 0;
+	int ret;
+
+	do {
+		ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
+		if (ret)
+			msleep(ADF_IOV_MSG_RETRY_DELAY);
+	} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_iov_putmsg);
+
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
+{
+	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int bar_id = hw_data->get_misc_bar_id(hw_data);
+	struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
+
+	/* Read message from the VF */
+	msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+
+	/* To ACK, clear the VF2PFINT bit */
+	msg &= ~ADF_VF2PF_INT;
+	ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
+
+	if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
+		/* Ignore legacy non-system (non-kernel) VF2PF messages */
+		goto err;
+
+	switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
+	case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+		{
+		u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+
+		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+			  ADF_PF2VF_MSGTYPE_SHIFT) |
+			 (ADF_PFVF_COMPATIBILITY_VERSION <<
+			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+
+		dev_dbg(&GET_DEV(accel_dev),
+			"Compatibility Version Request from VF%d vers=%u\n",
+			vf_nr + 1, vf_compat_ver);
+
+		if (vf_compat_ver < hw_data->min_iov_compat_ver) {
+			dev_err(&GET_DEV(accel_dev),
+				"VF (vers %d) incompatible with PF (vers %d)\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		} else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+			dev_err(&GET_DEV(accel_dev),
+				"VF (vers %d) compat with PF (vers %d) unkn.\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		} else {
+			dev_dbg(&GET_DEV(accel_dev),
+				"VF (vers %d) compatible with PF (vers %d)\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_COMPATIBLE <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		}
+		}
+		break;
+	case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+		dev_dbg(&GET_DEV(accel_dev),
+			"Legacy VersionRequest received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+			  ADF_PF2VF_MSGTYPE_SHIFT) |
+			 (ADF_PFVF_COMPATIBILITY_VERSION <<
+			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+		resp |= ADF_PF2VF_VF_COMPATIBLE <<
+			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		/* Set legacy major and minor version num */
+		resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
+			1 << ADF_PF2VF_MINORVERSION_SHIFT;
+		break;
+	case ADF_VF2PF_MSGTYPE_INIT:
+		{
+		dev_dbg(&GET_DEV(accel_dev),
+			"Init message received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		vf_info->init = true;
+		}
+		break;
+	case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+		{
+		dev_dbg(&GET_DEV(accel_dev),
+			"Shutdown message received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		vf_info->init = false;
+		}
+		break;
+	default:
+		goto err;
+	}
+
+	if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+		dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+
+	/* re-enable interrupt on PF from this VF */
+	adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+	return;
+err:
+	dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
+		vf_nr + 1, msg);
+}
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_vf_info *vf;
+	u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+		(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
+	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+		if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to send restarting msg to VF%d\n", i);
+	}
+}
+
+static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+	unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	u32 msg = 0;
+	int ret;
+
+	msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
+	msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
+	msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+	BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+
+	/* Send request from VF to PF */
+	ret = adf_iov_putmsg(accel_dev, msg, 0);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Compatibility Version Request.\n");
+		return ret;
+	}
+
+	/* Wait for response */
+	if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+					 timeout)) {
+		dev_err(&GET_DEV(accel_dev),
+			"IOV request/response message timeout expired\n");
+		return -EIO;
+	}
+
+	/* Response from PF received, check compatibility */
+	switch (accel_dev->vf.compatible) {
+	case ADF_PF2VF_VF_COMPATIBLE:
+		break;
+	case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+		/* VF is newer than PF and decides whether it is compatible */
+		if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+			break;
+		/* fall through */
+	case ADF_PF2VF_VF_INCOMPATIBLE:
+		dev_err(&GET_DEV(accel_dev),
+			"PF (vers %d) and VF (vers %d) are not compatible\n",
+			accel_dev->vf.pf_version,
+			ADF_PFVF_COMPATIBILITY_VERSION);
+		return -EINVAL;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"Invalid response from PF; assume not compatible\n");
+		return -EINVAL;
+	}
+	return ret;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	adf_enable_pf2vf_interrupts(accel_dev);
+	return adf_vf2pf_request_version(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
new file mode 100644
index 0000000..5acd531
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -0,0 +1,146 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_PF2VF_MSG_H
+#define ADF_PF2VF_MSG_H
+
+/*
+ * PF<->VF Messaging
+ * The PF has an array of 32-bit PF2VF registers, one for each VF.  The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   VF2PF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   PF2VF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
+ */
+
+#define ADF_PFVF_COMPATIBILITY_VERSION		0x1	/* PF<->VF compat */
+
+/* PF->VF messages */
+#define ADF_PF2VF_INT				BIT(0)
+#define ADF_PF2VF_MSGORIGIN_SYSTEM		BIT(1)
+#define ADF_PF2VF_MSGTYPE_MASK			0x0000003C
+#define ADF_PF2VF_MSGTYPE_SHIFT			2
+#define ADF_PF2VF_MSGTYPE_RESTARTING		0x01
+#define ADF_PF2VF_MSGTYPE_VERSION_RESP		0x02
+#define ADF_PF2VF_IN_USE_BY_PF			0x6AC20000
+#define ADF_PF2VF_IN_USE_BY_PF_MASK		0xFFFE0000
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK	0x00003FC0
+#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT	6
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK	0x0000C000
+#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT	14
+#define ADF_PF2VF_MINORVERSION_SHIFT		6
+#define ADF_PF2VF_MAJORVERSION_SHIFT		10
+#define ADF_PF2VF_VF_COMPATIBLE			1
+#define ADF_PF2VF_VF_INCOMPATIBLE		2
+#define ADF_PF2VF_VF_COMPAT_UNKNOWN		3
+
+/* VF->PF messages */
+#define ADF_VF2PF_IN_USE_BY_VF			0x00006AC2
+#define ADF_VF2PF_IN_USE_BY_VF_MASK		0x0000FFFE
+#define ADF_VF2PF_INT				BIT(16)
+#define ADF_VF2PF_MSGORIGIN_SYSTEM		BIT(17)
+#define ADF_VF2PF_MSGTYPE_MASK			0x003C0000
+#define ADF_VF2PF_MSGTYPE_SHIFT			18
+#define ADF_VF2PF_MSGTYPE_INIT			0x3
+#define ADF_VF2PF_MSGTYPE_SHUTDOWN		0x4
+#define ADF_VF2PF_MSGTYPE_VERSION_REQ		0x5
+#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ	0x6
+
+/* VF->PF Compatible Version Request */
+#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT		22
+
+/* Collision detection */
+#define ADF_IOV_MSG_COLLISION_DETECT_DELAY	10
+#define ADF_IOV_MSG_ACK_DELAY			2
+#define ADF_IOV_MSG_ACK_MAX_RETRY		100
+#define ADF_IOV_MSG_RETRY_DELAY			5
+#define ADF_IOV_MSG_MAX_RETRIES			3
+#define ADF_IOV_MSG_RESP_TIMEOUT	(ADF_IOV_MSG_ACK_DELAY * \
+					 ADF_IOV_MSG_ACK_MAX_RETRY + \
+					 ADF_IOV_MSG_COLLISION_DETECT_DELAY)
+#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
new file mode 100644
index 0000000..b36d865
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -0,0 +1,306 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pf2vf_msg.h"
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+#define ME2FUNCTION_MAP_A_OFFSET	(0x3A400 + 0x190)
+#define ME2FUNCTION_MAP_A_NUM_REGS	96
+
+#define ME2FUNCTION_MAP_B_OFFSET	(0x3A400 + 0x310)
+#define ME2FUNCTION_MAP_B_NUM_REGS	12
+
+#define ME2FUNCTION_MAP_REG_SIZE	4
+#define ME2FUNCTION_MAP_VALID		BIT(7)
+
+#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index)		\
+	ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value)	\
+	ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index)		\
+	ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value)	\
+	ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+struct adf_pf2vf_resp {
+	struct work_struct pf2vf_resp_work;
+	struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+	struct adf_pf2vf_resp *pf2vf_resp =
+		container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+
+	adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
+	kfree(pf2vf_resp);
+}
+
+static void adf_vf2pf_bh_handler(void *data)
+{
+	struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
+	struct adf_pf2vf_resp *pf2vf_resp;
+
+	pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+	if (!pf2vf_resp)
+		return;
+
+	pf2vf_resp->vf_info = vf_info;
+	INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+	queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	int totalvfs = pci_sriov_get_totalvfs(pdev);
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	struct adf_accel_vf_info *vf_info;
+	int i;
+	u32 reg;
+
+	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+	     i++, vf_info++) {
+		/* This ptr will be populated when VFs will be created */
+		vf_info->accel_dev = accel_dev;
+		vf_info->vf_nr = i;
+
+		tasklet_init(&vf_info->vf2pf_bh_tasklet,
+			     (void *)adf_vf2pf_bh_handler,
+			     (unsigned long)vf_info);
+		mutex_init(&vf_info->pf2vf_lock);
+		ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+				     DEFAULT_RATELIMIT_INTERVAL,
+				     DEFAULT_RATELIMIT_BURST);
+	}
+
+	/* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
+	for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+		reg |= ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+	}
+
+	/* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
+	for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+		reg |= ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+	}
+
+	/* Enable VF to PF interrupts for all VFs */
+	adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
+
+	/*
+	 * Due to the hardware design, when SR-IOV and the ring arbiter
+	 * are enabled all the VFs supported in hardware must be enabled in
+	 * order for all the hardware resources (i.e. bundles) to be usable.
+	 * When SR-IOV is enabled, each of the VFs will own one bundle.
+	 */
+	return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @accel_dev:  Pointer to accel device.
+ *
+ * Function disables SRIOV for the accel device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+	struct adf_accel_vf_info *vf;
+	u32 reg;
+	int i;
+
+	if (!accel_dev->pf.vf_info)
+		return;
+
+	adf_pf2vf_notify_restarting(accel_dev);
+
+	pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+	/* Disable VF to PF interrupts */
+	adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
+
+	/* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
+	for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+		reg &= ~ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+	}
+
+	/* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
+	for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+		reg &= ~ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+	}
+
+	for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
+		tasklet_disable(&vf->vf2pf_bh_tasklet);
+		tasklet_kill(&vf->vf2pf_bh_tasklet);
+		mutex_destroy(&vf->pf2vf_lock);
+	}
+
+	kfree(accel_dev->pf.vf_info);
+	accel_dev->pf.vf_info = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev:  Pointer to pci device.
+ *
+ * Function enables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+	int totalvfs = pci_sriov_get_totalvfs(pdev);
+	unsigned long val;
+	int ret;
+
+	if (!accel_dev) {
+		dev_err(&pdev->dev, "Failed to find accel_dev\n");
+		return -EFAULT;
+	}
+
+	if (!iommu_present(&pci_bus_type))
+		dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
+
+	if (accel_dev->pf.vf_info) {
+		dev_info(&pdev->dev, "Already enabled for this device\n");
+		return -EINVAL;
+	}
+
+	if (adf_dev_started(accel_dev)) {
+		if (adf_devmgr_in_reset(accel_dev) ||
+		    adf_dev_in_use(accel_dev)) {
+			dev_err(&GET_DEV(accel_dev), "Device busy\n");
+			return -EBUSY;
+		}
+
+		adf_dev_stop(accel_dev);
+		adf_dev_shutdown(accel_dev);
+	}
+
+	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+		return -EFAULT;
+	val = 0;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					ADF_NUM_CY, (void *)&val, ADF_DEC))
+		return -EFAULT;
+
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+	/* Allocate memory for VF info structs */
+	accel_dev->pf.vf_info = kcalloc(totalvfs,
+					sizeof(struct adf_accel_vf_info),
+					GFP_KERNEL);
+	if (!accel_dev->pf.vf_info)
+		return -ENOMEM;
+
+	if (adf_dev_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
+			accel_dev->accel_id);
+		return -EFAULT;
+	}
+
+	if (adf_dev_start(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+			accel_dev->accel_id);
+		return -EFAULT;
+	}
+
+	ret = adf_enable_sriov(accel_dev);
+	if (ret)
+		return ret;
+
+	return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+	/* Workqueue for PF2VF responses */
+	pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
+
+	return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+	if (pf2vf_resp_wq) {
+		destroy_workqueue(pf2vf_resp_wq);
+		pf2vf_resp_wq = NULL;
+	}
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
new file mode 100644
index 0000000..57d2622
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -0,0 +1,566 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+	uint32_t div = data >> shift;
+	uint32_t mult = div << shift;
+
+	return data - mult;
+}
+
+static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
+{
+	if (((size - 1) & addr) != 0)
+		return -EFAULT;
+	return 0;
+}
+
+static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
+{
+	int i = ADF_MIN_RING_SIZE;
+
+	for (; i <= ADF_MAX_RING_SIZE; i++)
+		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+			return i;
+
+	return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock(&bank->lock);
+	if (bank->ring_mask & (1 << ring)) {
+		spin_unlock(&bank->lock);
+		return -EFAULT;
+	}
+	bank->ring_mask |= (1 << ring);
+	spin_unlock(&bank->lock);
+	return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock(&bank->lock);
+	bank->ring_mask &= ~(1 << ring);
+	spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock_bh(&bank->lock);
+	bank->irq_mask |= (1 << ring);
+	spin_unlock_bh(&bank->lock);
+	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+	WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
+			      bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock_bh(&bank->lock);
+	bank->irq_mask &= ~(1 << ring);
+	spin_unlock_bh(&bank->lock);
+	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
+{
+	if (atomic_add_return(1, ring->inflights) >
+	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+		atomic_dec(ring->inflights);
+		return -EAGAIN;
+	}
+	spin_lock_bh(&ring->lock);
+	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
+	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+	ring->tail = adf_modulo(ring->tail +
+				ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+				ADF_RING_SIZE_MODULO(ring->ring_size));
+	WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
+			    ring->ring_number, ring->tail);
+	spin_unlock_bh(&ring->lock);
+	return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+	uint32_t msg_counter = 0;
+	uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
+
+	while (*msg != ADF_RING_EMPTY_SIG) {
+		ring->callback((uint32_t *)msg);
+		atomic_dec(ring->inflights);
+		*msg = ADF_RING_EMPTY_SIG;
+		ring->head = adf_modulo(ring->head +
+					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+					ADF_RING_SIZE_MODULO(ring->ring_size));
+		msg_counter++;
+		msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
+	}
+	if (msg_counter > 0)
+		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
+				    ring->bank->bank_number,
+				    ring->ring_number, ring->head);
+	return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+			      ring->ring_number, ring_config);
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_config =
+			BUILD_RESP_RING_CONFIG(ring->ring_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+			      ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+	struct adf_etr_bank_data *bank = ring->bank;
+	struct adf_accel_dev *accel_dev = bank->accel_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint64_t ring_base;
+	uint32_t ring_size_bytes =
+			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+	ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+					     ring_size_bytes, &ring->dma_addr,
+					     GFP_KERNEL);
+	if (!ring->base_addr)
+		return -ENOMEM;
+
+	memset(ring->base_addr, 0x7F, ring_size_bytes);
+	/* The base_addr has to be aligned to the size of the buffer */
+	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+		dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
+		dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+				  ring->base_addr, ring->dma_addr);
+		return -EFAULT;
+	}
+
+	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+		adf_configure_tx_ring(ring);
+
+	else
+		adf_configure_rx_ring(ring);
+
+	ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
+	WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
+			    ring->ring_number, ring_base);
+	spin_lock_init(&ring->lock);
+	return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_size_bytes =
+			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+	if (ring->base_addr) {
+		memset(ring->base_addr, 0x7F, ring_size_bytes);
+		dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+				  ring_size_bytes, ring->base_addr,
+				  ring->dma_addr);
+	}
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+		    uint32_t bank_num, uint32_t num_msgs,
+		    uint32_t msg_size, const char *ring_name,
+		    adf_callback_fn callback, int poll_mode,
+		    struct adf_etr_ring_data **ring_ptr)
+{
+	struct adf_etr_data *transport_data = accel_dev->transport;
+	struct adf_etr_bank_data *bank;
+	struct adf_etr_ring_data *ring;
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	uint32_t ring_num;
+	int ret;
+
+	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
+		return -EFAULT;
+	}
+	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+		dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
+		return -EFAULT;
+	}
+	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+		dev_err(&GET_DEV(accel_dev),
+			"Invalid ring size for given msg size\n");
+		return -EFAULT;
+	}
+	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+		dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
+			section, ring_name);
+		return -EFAULT;
+	}
+	if (kstrtouint(val, 10, &ring_num)) {
+		dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
+		return -EFAULT;
+	}
+	if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
+		dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+		return -EFAULT;
+	}
+
+	bank = &transport_data->banks[bank_num];
+	if (adf_reserve_ring(bank, ring_num)) {
+		dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
+			ring_num, ring_name);
+		return -EFAULT;
+	}
+	ring = &bank->rings[ring_num];
+	ring->ring_number = ring_num;
+	ring->bank = bank;
+	ring->callback = callback;
+	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+	ring->head = 0;
+	ring->tail = 0;
+	atomic_set(ring->inflights, 0);
+	ret = adf_init_ring(ring);
+	if (ret)
+		goto err;
+
+	/* Enable HW arbitration for the given ring */
+	adf_update_ring_arb(ring);
+
+	if (adf_ring_debugfs_add(ring, ring_name)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Couldn't add ring debugfs entry\n");
+		ret = -EFAULT;
+		goto err;
+	}
+
+	/* Enable interrupts if needed */
+	if (callback && (!poll_mode))
+		adf_enable_ring_irq(bank, ring->ring_number);
+	*ring_ptr = ring;
+	return 0;
+err:
+	adf_cleanup_ring(ring);
+	adf_unreserve_ring(bank, ring_num);
+	adf_update_ring_arb(ring);
+	return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+	struct adf_etr_bank_data *bank = ring->bank;
+
+	/* Disable interrupts for the given ring */
+	adf_disable_ring_irq(bank, ring->ring_number);
+
+	/* Clear PCI config space */
+	WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
+			      ring->ring_number, 0);
+	WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
+			    ring->ring_number, 0);
+	adf_ring_debugfs_rm(ring);
+	adf_unreserve_ring(bank, ring->ring_number);
+	/* Disable HW arbitration for the given ring */
+	adf_update_ring_arb(ring);
+	adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+	uint32_t empty_rings, i;
+
+	empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
+	empty_rings = ~empty_rings & bank->irq_mask;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
+		if (empty_rings & (1 << i))
+			adf_handle_response(&bank->rings[i]);
+	}
+}
+
+void adf_response_handler(uintptr_t bank_addr)
+{
+	struct adf_etr_bank_data *bank = (void *)bank_addr;
+
+	/* Handle all the responses and reenable IRQs */
+	adf_ring_response_handler(bank);
+	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+				   bank->irq_mask);
+}
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+				  const char *section, const char *format,
+				  uint32_t key, uint32_t *value)
+{
+	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+		return -EFAULT;
+
+	if (kstrtouint(val_buf, 10, value))
+		return -EFAULT;
+	return 0;
+}
+
+static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
+				  const char *section,
+				  uint32_t bank_num_in_accel)
+{
+	if (adf_get_cfg_int(bank->accel_dev, section,
+			    ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+			    bank_num_in_accel, &bank->irq_coalesc_timer))
+		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+	if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+	    ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+			 struct adf_etr_bank_data *bank,
+			 uint32_t bank_num, void __iomem *csr_addr)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_etr_ring_data *ring;
+	struct adf_etr_ring_data *tx_ring;
+	uint32_t i, coalesc_enabled = 0;
+
+	memset(bank, 0, sizeof(*bank));
+	bank->bank_number = bank_num;
+	bank->csr_addr = csr_addr;
+	bank->accel_dev = accel_dev;
+	spin_lock_init(&bank->lock);
+
+	/* Enable IRQ coalescing always. This will allow to use
+	 * the optimised flag and coalesc register.
+	 * If it is disabled in the config file just use min time value */
+	if ((adf_get_cfg_int(accel_dev, "Accelerator0",
+			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
+			     &coalesc_enabled) == 0) && coalesc_enabled)
+		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
+	else
+		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
+		WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
+		ring = &bank->rings[i];
+		if (hw_data->tx_rings_mask & (1 << i)) {
+			ring->inflights =
+				kzalloc_node(sizeof(atomic_t),
+					     GFP_KERNEL,
+					     dev_to_node(&GET_DEV(accel_dev)));
+			if (!ring->inflights)
+				goto err;
+		} else {
+			if (i < hw_data->tx_rx_gap) {
+				dev_err(&GET_DEV(accel_dev),
+					"Invalid tx rings mask config\n");
+				goto err;
+			}
+			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+			ring->inflights = tx_ring->inflights;
+		}
+	}
+	if (adf_bank_debugfs_add(bank)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to add bank debugfs entry\n");
+		goto err;
+	}
+
+	WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
+	WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
+	return 0;
+err:
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		ring = &bank->rings[i];
+		if (hw_data->tx_rings_mask & (1 << i))
+			kfree(ring->inflights);
+	}
+	return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr_addr;
+	uint32_t size;
+	uint32_t num_banks = 0;
+	int i, ret;
+
+	etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+				dev_to_node(&GET_DEV(accel_dev)));
+	if (!etr_data)
+		return -ENOMEM;
+
+	num_banks = GET_MAX_BANKS(accel_dev);
+	size = num_banks * sizeof(struct adf_etr_bank_data);
+	etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+				       dev_to_node(&GET_DEV(accel_dev)));
+	if (!etr_data->banks) {
+		ret = -ENOMEM;
+		goto err_bank;
+	}
+
+	accel_dev->transport = etr_data;
+	i = hw_data->get_etr_bar_id(hw_data);
+	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+	/* accel_dev->debugfs_dir should always be non-NULL here */
+	etr_data->debug = debugfs_create_dir("transport",
+					     accel_dev->debugfs_dir);
+	if (!etr_data->debug) {
+		dev_err(&GET_DEV(accel_dev),
+			"Unable to create transport debugfs entry\n");
+		ret = -ENOENT;
+		goto err_bank_debug;
+	}
+
+	for (i = 0; i < num_banks; i++) {
+		ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+				    csr_addr);
+		if (ret)
+			goto err_bank_all;
+	}
+
+	return 0;
+
+err_bank_all:
+	debugfs_remove(etr_data->debug);
+err_bank_debug:
+	kfree(etr_data->banks);
+err_bank:
+	kfree(etr_data);
+	accel_dev->transport = NULL;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+	uint32_t i;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		struct adf_accel_dev *accel_dev = bank->accel_dev;
+		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+		struct adf_etr_ring_data *ring = &bank->rings[i];
+
+		if (bank->ring_mask & (1 << i))
+			adf_cleanup_ring(ring);
+
+		if (hw_data->tx_rings_mask & (1 << i))
+			kfree(ring->inflights);
+	}
+	adf_bank_debugfs_rm(bank);
+	memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
+
+	for (i = 0; i < num_banks; i++)
+		cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data = accel_dev->transport;
+
+	if (etr_data) {
+		adf_cleanup_etr_handles(accel_dev);
+		debugfs_remove(etr_data->debug);
+		kfree(etr_data->banks);
+		kfree(etr_data);
+		accel_dev->transport = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
new file mode 100644
index 0000000..386485b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport.h
@@ -0,0 +1,63 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+		    uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
+		    const char *ring_name, adf_callback_fn callback,
+		    int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
new file mode 100644
index 0000000..80e02a2
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -0,0 +1,169 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK 0xFFFF
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_FLAG	0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE	0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+	((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+		ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+				SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+	((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size)	\
+	((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+	| (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+	| size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+	((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM)	\
+	| (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+	| size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+	((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_LBASE + (ring << 2), l_base);	\
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_UBASE + (ring << 2), u_base);	\
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+			ADF_RING_CSR_INT_FLAG, value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+	ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0);	\
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+	ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_COL_CTL, \
+			ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
new file mode 100644
index 0000000..52340b9
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -0,0 +1,294 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+
+	mutex_lock(&ring_read_lock);
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+		return NULL;
+
+	return ring->base_addr +
+		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+
+	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+		return NULL;
+
+	return ring->base_addr +
+		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+	struct adf_etr_bank_data *bank = ring->bank;
+	void __iomem *csr = ring->bank->csr_addr;
+
+	if (v == SEQ_START_TOKEN) {
+		int head, tail, empty;
+
+		head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+					  ring->ring_number);
+		tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+					  ring->ring_number);
+		empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+		seq_puts(sfile, "------- Ring configuration -------\n");
+		seq_printf(sfile, "ring name: %s\n",
+			   ring->ring_debug->ring_name);
+		seq_printf(sfile, "ring num %d, bank num %d\n",
+			   ring->ring_number, ring->bank->bank_number);
+		seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+			   head, tail, (empty & 1 << ring->ring_number)
+			   >> ring->ring_number);
+		seq_printf(sfile, "ring size %d, msg size %d\n",
+			   ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+			   ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+		seq_puts(sfile, "----------- Ring data ------------\n");
+		return 0;
+	}
+	seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
+		     v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
+	return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_sops = {
+	.start = adf_ring_start,
+	.next = adf_ring_next,
+	.stop = adf_ring_stop,
+	.show = adf_ring_show
+};
+
+static int adf_ring_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &adf_ring_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations adf_ring_debug_fops = {
+	.open = adf_ring_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+	struct adf_etr_ring_debug_entry *ring_debug;
+	char entry_name[8];
+
+	ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+	if (!ring_debug)
+		return -ENOMEM;
+
+	strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+	snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+		 ring->ring_number);
+
+	ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+						ring->bank->bank_debug_dir,
+						ring, &adf_ring_debug_fops);
+	if (!ring_debug->debug) {
+		pr_err("QAT: Failed to create ring debug entry.\n");
+		kfree(ring_debug);
+		return -EFAULT;
+	}
+	ring->ring_debug = ring_debug;
+	return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+	if (ring->ring_debug) {
+		debugfs_remove(ring->ring_debug->debug);
+		kfree(ring->ring_debug);
+		ring->ring_debug = NULL;
+	}
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+	mutex_lock(&bank_read_lock);
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK)
+		return NULL;
+
+	return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK)
+		return NULL;
+
+	return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+	struct adf_etr_bank_data *bank = sfile->private;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(sfile, "------- Bank %d configuration -------\n",
+			   bank->bank_number);
+	} else {
+		int ring_id = *((int *)v) - 1;
+		struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+		void __iomem *csr = bank->csr_addr;
+		int head, tail, empty;
+
+		if (!(bank->ring_mask & 1 << ring_id))
+			return 0;
+
+		head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+					  ring->ring_number);
+		tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+					  ring->ring_number);
+		empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+		seq_printf(sfile,
+			   "ring num %02d, head %04x, tail %04x, empty: %d\n",
+			   ring->ring_number, head, tail,
+			   (empty & 1 << ring->ring_number) >>
+			   ring->ring_number);
+	}
+	return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_sops = {
+	.start = adf_bank_start,
+	.next = adf_bank_next,
+	.stop = adf_bank_stop,
+	.show = adf_bank_show
+};
+
+static int adf_bank_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &adf_bank_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations adf_bank_debug_fops = {
+	.open = adf_bank_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+	struct adf_accel_dev *accel_dev = bank->accel_dev;
+	struct dentry *parent = accel_dev->transport->debug;
+	char name[8];
+
+	snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+	bank->bank_debug_dir = debugfs_create_dir(name, parent);
+	if (!bank->bank_debug_dir) {
+		pr_err("QAT: Failed to create bank debug dir.\n");
+		return -EFAULT;
+	}
+
+	bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+						   bank->bank_debug_dir, bank,
+						   &adf_bank_debug_fops);
+	if (!bank->bank_debug_cfg) {
+		pr_err("QAT: Failed to create bank debug entry.\n");
+		debugfs_remove(bank->bank_debug_dir);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+	debugfs_remove(bank->bank_debug_cfg);
+	debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
new file mode 100644
index 0000000..bb88336
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -0,0 +1,117 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+	char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+	void *base_addr;
+	atomic_t *inflights;
+	spinlock_t lock;	/* protects ring data struct */
+	adf_callback_fn callback;
+	struct adf_etr_bank_data *bank;
+	dma_addr_t dma_addr;
+	uint16_t head;
+	uint16_t tail;
+	uint8_t ring_number;
+	uint8_t ring_size;
+	uint8_t msg_size;
+	uint8_t reserved;
+	struct adf_etr_ring_debug_entry *ring_debug;
+} __packed;
+
+struct adf_etr_bank_data {
+	struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
+	struct tasklet_struct resp_handler;
+	void __iomem *csr_addr;
+	struct adf_accel_dev *accel_dev;
+	uint32_t irq_coalesc_timer;
+	uint16_t ring_mask;
+	uint16_t irq_mask;
+	spinlock_t lock;	/* protects bank data struct */
+	struct dentry *bank_debug_dir;
+	struct dentry *bank_debug_cfg;
+	uint32_t bank_number;
+} __packed;
+
+struct adf_etr_data {
+	struct adf_etr_bank_data *banks;
+	struct dentry *debug;
+};
+
+void adf_response_handler(uintptr_t bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+	return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+				       const char *name)
+{
+	return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
new file mode 100644
index 0000000..cd5f37d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -0,0 +1,92 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+/**
+ * adf_vf2pf_init() - send init msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends an init messge from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Init event to PF\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+
+/**
+ * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown messge from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+		if (adf_iov_putmsg(accel_dev, msg, 0))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
new file mode 100644
index 0000000..4a73fc7
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -0,0 +1,335 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_VINTSOU_OFFSET	0x204
+#define ADF_VINTSOU_BUN		BIT(0)
+#define ADF_VINTSOU_PF2VF	BIT(1)
+
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+	struct adf_accel_dev *accel_dev;
+	struct work_struct work;
+};
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	int stat = pci_enable_msi(pci_dev_info->pci_dev);
+
+	if (stat) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to enable MSI interrupts\n");
+		return stat;
+	}
+
+	accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+	if (!accel_dev->vf.irq_name)
+		return -ENOMEM;
+
+	return stat;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	kfree(accel_dev->vf.irq_name);
+	pci_disable_msi(pdev);
+}
+
+static void adf_dev_stop_async(struct work_struct *work)
+{
+	struct adf_vf_stop_data *stop_data =
+		container_of(work, struct adf_vf_stop_data, work);
+	struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+
+	/* Re-enable PF2VF interrupts */
+	adf_enable_pf2vf_interrupts(accel_dev);
+	kfree(stop_data);
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+	struct adf_accel_dev *accel_dev = data;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+	u32 msg;
+
+	/* Read the message from PF */
+	msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
+
+	if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
+		/* Ignore legacy non-system (non-kernel) PF2VF messages */
+		goto err;
+
+	switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
+	case ADF_PF2VF_MSGTYPE_RESTARTING: {
+		struct adf_vf_stop_data *stop_data;
+
+		dev_dbg(&GET_DEV(accel_dev),
+			"Restarting msg received from PF 0x%x\n", msg);
+
+		clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+		stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+		if (!stop_data) {
+			dev_err(&GET_DEV(accel_dev),
+				"Couldn't schedule stop for vf_%d\n",
+				accel_dev->accel_id);
+			return;
+		}
+		stop_data->accel_dev = accel_dev;
+		INIT_WORK(&stop_data->work, adf_dev_stop_async);
+		queue_work(adf_vf_stop_wq, &stop_data->work);
+		/* To ack, clear the PF2VFINT bit */
+		msg &= ~ADF_PF2VF_INT;
+		ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+		return;
+	}
+	case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+		dev_dbg(&GET_DEV(accel_dev),
+			"Version resp received from PF 0x%x\n", msg);
+		accel_dev->vf.pf_version =
+			(msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
+			ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
+		accel_dev->vf.compatible =
+			(msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
+			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		complete(&accel_dev->vf.iov_msg_completion);
+		break;
+	default:
+		goto err;
+	}
+
+	/* To ack, clear the PF2VFINT bit */
+	msg &= ~ADF_PF2VF_INT;
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+
+	/* Re-enable PF2VF interrupts */
+	adf_enable_pf2vf_interrupts(accel_dev);
+	return;
+err:
+	dev_err(&GET_DEV(accel_dev),
+		"Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
+		msg);
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+	tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+		     (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+	mutex_init(&accel_dev->vf.vf2pf_lock);
+	return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+	tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+	tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+	mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+	struct adf_accel_dev *accel_dev = privdata;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+	u32 v_int;
+
+	/* Read VF INT source CSR to determine the source of VF interrupt */
+	v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
+
+	/* Check for PF2VF interrupt */
+	if (v_int & ADF_VINTSOU_PF2VF) {
+		/* Disable PF to VF interrupt */
+		adf_disable_pf2vf_interrupts(accel_dev);
+
+		/* Schedule tasklet to handle interrupt BH */
+		tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+		return IRQ_HANDLED;
+	}
+
+	/* Check bundle interrupt */
+	if (v_int & ADF_VINTSOU_BUN) {
+		struct adf_etr_data *etr_data = accel_dev->transport;
+		struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+		/* Disable Flag and Coalesce Ring Interrupts */
+		WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+					   0);
+		tasklet_hi_schedule(&bank->resp_handler);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	unsigned int cpu;
+	int ret;
+
+	snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+		 "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+	ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+			  (void *)accel_dev);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+			accel_dev->vf.irq_name);
+		return ret;
+	}
+	cpu = accel_dev->accel_id % num_online_cpus();
+	irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+
+	return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+
+	tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+		     (unsigned long)priv_data->banks);
+	return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+
+	tasklet_disable(&priv_data->banks[0].resp_handler);
+	tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+/**
+ * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device virtual function.
+ */
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	irq_set_affinity_hint(pdev->irq, NULL);
+	free_irq(pdev->irq, (void *)accel_dev);
+	adf_cleanup_bh(accel_dev);
+	adf_cleanup_pf2vf_bh(accel_dev);
+	adf_disable_msi(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
+
+/**
+ * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+	if (adf_enable_msi(accel_dev))
+		goto err_out;
+
+	if (adf_setup_pf2vf_bh(accel_dev))
+		goto err_out;
+
+	if (adf_setup_bh(accel_dev))
+		goto err_out;
+
+	if (adf_request_msi_irq(accel_dev))
+		goto err_out;
+
+	return 0;
+err_out:
+	adf_vf_isr_resource_free(accel_dev);
+	return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+int __init adf_init_vf_wq(void)
+{
+	adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+
+	return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+	if (adf_vf_stop_wq)
+		destroy_workqueue(adf_vf_stop_wq);
+
+	adf_vf_stop_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
new file mode 100644
index 0000000..46747f0
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -0,0 +1,318 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+		(((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+	(((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+	ICP_QAT_FW_COMN_RESP_SERV_NULL,
+	ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+	ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+	ICP_QAT_FW_COMN_REQ_NULL = 0,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+	ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t serv_specif_fields[4];
+		} s1;
+	} u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+	uint64_t opaque_data;
+	uint64_t src_data_addr;
+	uint64_t dest_data_addr;
+	uint32_t src_length;
+	uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+	uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+	uint8_t resrvd1;
+	uint8_t service_cmd_id;
+	uint8_t service_type;
+	uint8_t hdr_flags;
+	uint16_t serv_specif_flags;
+	uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+	uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+	struct icp_qat_fw_comn_req_hdr comn_hdr;
+	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	struct icp_qat_fw_comn_req_mid comn_mid;
+	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+	uint8_t xlat_err_code;
+	uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+	uint8_t resrvd1;
+	uint8_t service_id;
+	uint8_t response_type;
+	uint8_t hdr_flags;
+	struct icp_qat_fw_comn_error comn_error;
+	uint8_t comn_status;
+	uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+	struct icp_qat_fw_comn_resp_hdr comn_hdr;
+	uint64_t opaque_data;
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+	icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+	icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+	icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+	icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+	ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+	ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+	QAT_FIELD_GET(hdr_flags, \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+	ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+	(hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+	QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+	ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+			QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+			QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+			QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+	>> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+	{ ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	 & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+	{ ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+	((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+	QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+	(((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+	QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+	(((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+	QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+	(((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+	QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+	QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+	QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+	QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+	QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+	ICP_QAT_FW_SLICE_NULL = 0,
+	ICP_QAT_FW_SLICE_CIPHER = 1,
+	ICP_QAT_FW_SLICE_AUTH = 2,
+	ICP_QAT_FW_SLICE_DRAM_RD = 3,
+	ICP_QAT_FW_SLICE_DRAM_WR = 4,
+	ICP_QAT_FW_SLICE_COMP = 5,
+	ICP_QAT_FW_SLICE_XLAT = 6,
+	ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644
index 0000000..72a59fa
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -0,0 +1,131 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+	ICP_QAT_FW_INIT_ME = 0,
+	ICP_QAT_FW_TRNG_ENABLE = 1,
+	ICP_QAT_FW_TRNG_DISABLE = 2,
+	ICP_QAT_FW_CONSTANTS_CFG = 3,
+	ICP_QAT_FW_STATUS_GET = 4,
+	ICP_QAT_FW_COUNTERS_GET = 5,
+	ICP_QAT_FW_LOOPBACK = 6,
+	ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+	ICP_QAT_FW_HEARTBEAT_GET = 8
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+	ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+	ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+	uint16_t init_cfg_sz;
+	uint8_t resrvd1;
+	uint8_t init_admin_cmd_id;
+	uint32_t resrvd2;
+	uint64_t opaque_data;
+	uint64_t init_cfg_ptr;
+	uint64_t resrvd3;
+};
+
+struct icp_qat_fw_init_admin_resp_hdr {
+	uint8_t flags;
+	uint8_t resrvd1;
+	uint8_t status;
+	uint8_t init_admin_cmd_id;
+};
+
+struct icp_qat_fw_init_admin_resp_pars {
+	union {
+		uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
+		struct {
+			uint32_t version_patch_num;
+			uint8_t context_id;
+			uint8_t ae_id;
+			uint16_t resrvd1;
+			uint64_t resrvd2;
+		} s1;
+		struct {
+			uint64_t req_rec_count;
+			uint64_t resp_sent_count;
+		} s2;
+	} u;
+};
+
+struct icp_qat_fw_init_admin_resp {
+	struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
+	union {
+		uint32_t resrvd2;
+		struct {
+			uint16_t version_minor_num;
+			uint16_t version_major_num;
+		} s;
+	} u;
+	uint64_t opaque_data;
+	struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
+};
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+	ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+	ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, \
+		 ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+		 ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
new file mode 100644
index 0000000..c8d2669
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
@@ -0,0 +1,404 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+	ICP_QAT_FW_LA_CMD_CIPHER = 0,
+	ICP_QAT_FW_LA_CMD_AUTH = 1,
+	ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+	ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+	ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+	ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+	ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+	ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+	ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+	ICP_QAT_FW_LA_CMD_MGF1 = 9,
+	ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+	ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+	ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+	struct icp_qat_fw_comn_req_hdr comn_hdr;
+	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	struct icp_qat_fw_comn_req_mid comn_mid;
+	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS	10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO	2
+#define ICP_QAT_FW_LA_CCM_PROTO	1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK   0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+	cmp_auth, ret_auth, update_state, \
+	ciph_iv, ciphcfg, partial) \
+	(((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+	((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+	QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+	((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+	QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+	((proto & QAT_LA_PROTO_MASK) << \
+	QAT_LA_PROTO_BITPOS)	| \
+	((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+	QAT_LA_CMP_AUTH_RES_BITPOS) | \
+	((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+	QAT_LA_RET_AUTH_RES_BITPOS) | \
+	((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+	QAT_LA_UPDATE_STATE_BITPOS) | \
+	((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+	QAT_LA_CIPH_IV_FLD_BITPOS) | \
+	((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+	((partial & QAT_LA_PARTIAL_MASK) << \
+	QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+	QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+	QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+	QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+	QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+	QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+	QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+	QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+	QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+	QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+	QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+	QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+	QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+	QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+	QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+	QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		} s1;
+	} u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		} sl;
+	} u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+	uint8_t cipher_state_sz;
+	uint8_t cipher_key_sz;
+	uint8_t cipher_cfg_offset;
+	uint8_t next_curr_id;
+	uint8_t cipher_padding_sz;
+	uint8_t resrvd1;
+	uint16_t resrvd2;
+	uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+	uint32_t resrvd1;
+	uint8_t resrvd2;
+	uint8_t hash_flags;
+	uint8_t hash_cfg_offset;
+	uint8_t next_curr_id;
+	uint8_t resrvd3;
+	uint8_t outer_prefix_sz;
+	uint8_t final_sz;
+	uint8_t inner_res_sz;
+	uint8_t resrvd4;
+	uint8_t inner_state1_sz;
+	uint8_t inner_state2_offset;
+	uint8_t inner_state2_sz;
+	uint8_t outer_config_offset;
+	uint8_t outer_state1_sz;
+	uint8_t outer_res_sz;
+	uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+	uint8_t cipher_state_sz;
+	uint8_t cipher_key_sz;
+	uint8_t cipher_cfg_offset;
+	uint8_t next_curr_id_cipher;
+	uint8_t cipher_padding_sz;
+	uint8_t hash_flags;
+	uint8_t hash_cfg_offset;
+	uint8_t next_curr_id_auth;
+	uint8_t resrvd1;
+	uint8_t outer_prefix_sz;
+	uint8_t final_sz;
+	uint8_t inner_res_sz;
+	uint8_t resrvd2;
+	uint8_t inner_state1_sz;
+	uint8_t inner_state2_offset;
+	uint8_t inner_state2_sz;
+	uint8_t outer_config_offset;
+	uint8_t outer_state1_sz;
+	uint8_t outer_res_sz;
+	uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX	240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+	(sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+	uint32_t cipher_offset;
+	uint32_t cipher_length;
+	union {
+		uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		struct {
+			uint64_t cipher_IV_ptr;
+			uint64_t resrvd1;
+		} s;
+	} u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+	uint32_t auth_off;
+	uint32_t auth_len;
+	union {
+		uint64_t auth_partial_st_prefix;
+		uint64_t aad_adr;
+	} u1;
+	uint64_t auth_res_addr;
+	union {
+		uint8_t inner_prefix_sz;
+		uint8_t aad_sz;
+	} u2;
+	uint8_t resrvd1;
+	uint8_t hash_state_sz;
+	uint8_t auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+	union {
+		uint8_t inner_prefix_sz;
+		uint8_t aad_sz;
+	} u2;
+	uint8_t resrvd1;
+	uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+	struct icp_qat_fw_comn_resp_hdr comn_resp;
+	uint64_t opaque_data;
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+	  ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+	>> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644
index 0000000..2ffef3e
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -0,0 +1,88 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+	unsigned int state;
+	unsigned int ustore_size;
+	unsigned int free_addr;
+	unsigned int free_size;
+	unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+	struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+	unsigned int ae_mask;
+	unsigned int slice_mask;
+	unsigned int revision_id;
+	unsigned int ae_max_num;
+	unsigned int upc_mask;
+	unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_handle {
+	struct icp_qat_fw_loader_hal_handle *hal_handle;
+	struct pci_dev *pci_dev;
+	void *obj_handle;
+	void *sobj_handle;
+	bool fw_auth;
+	void __iomem *hal_sram_addr_v;
+	void __iomem *hal_cap_g_ctl_csr_addr_v;
+	void __iomem *hal_cap_ae_xfer_csr_addr_v;
+	void __iomem *hal_cap_ae_local_csr_addr_v;
+	void __iomem *hal_ep_csr_addr_v;
+};
+
+struct icp_firml_dram_desc {
+	void __iomem *dram_base_addr;
+	void *dram_base_addr_v;
+	dma_addr_t dram_bus_addr;
+	u64 dram_size;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644
index 0000000..0d7a9b5
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -0,0 +1,112 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+	u64 content_desc_addr;
+	u32 content_desc_resrvd;
+	u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+	u64 opaque;
+	u64 src_data_addr;
+	u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+	u8 resrvd1;
+	u8 resrvd2;
+	u8 service_type;
+	u8 hdr_flags;
+	u16 comn_req_flags;
+	u16 resrvd4;
+	struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+	struct icp_qat_fw_req_pke_hdr pke_hdr;
+	struct icp_qat_fw_req_pke_mid pke_mid;
+	u8 output_param_count;
+	u8 input_param_count;
+	u16 resrvd1;
+	u32 resrvd2;
+	u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+	u8 resrvd1;
+	u8 resrvd2;
+	u8 response_type;
+	u8 hdr_flags;
+	u16 comn_resp_flags;
+	u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+	struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+	u64 opaque;
+	u64 src_data_addr;
+	u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS              7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK                0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+	QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+		ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+		QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+		QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+	QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+		ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+		ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
new file mode 100644
index 0000000..7187917
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h
@@ -0,0 +1,156 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+	MISC_CONTROL = 0x04,
+	ICP_RESET = 0x0c,
+	ICP_GLOBAL_CLK_ENABLE = 0x50
+};
+
+enum hal_ae_csr {
+	USTORE_ADDRESS = 0x000,
+	USTORE_DATA_LOWER = 0x004,
+	USTORE_DATA_UPPER = 0x008,
+	ALU_OUT = 0x010,
+	CTX_ARB_CNTL = 0x014,
+	CTX_ENABLES = 0x018,
+	CC_ENABLE = 0x01c,
+	CSR_CTX_POINTER = 0x020,
+	CTX_STS_INDIRECT = 0x040,
+	ACTIVE_CTX_STATUS = 0x044,
+	CTX_SIG_EVENTS_INDIRECT = 0x048,
+	CTX_SIG_EVENTS_ACTIVE = 0x04c,
+	CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+	LM_ADDR_0_INDIRECT = 0x060,
+	LM_ADDR_1_INDIRECT = 0x068,
+	INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+	INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+	FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+	TIMESTAMP_LOW = 0x0c0,
+	TIMESTAMP_HIGH = 0x0c4,
+	PROFILE_COUNT = 0x144,
+	SIGNATURE_ENABLE = 0x150,
+	AE_MISC_CONTROL = 0x160,
+	LOCAL_CSR_STATUS = 0x180,
+};
+
+enum fcu_csr {
+	FCU_CONTROL           = 0x8c0,
+	FCU_STATUS            = 0x8c4,
+	FCU_STATUS1           = 0x8c8,
+	FCU_DRAM_ADDR_LO      = 0x8cc,
+	FCU_DRAM_ADDR_HI      = 0x8d0,
+	FCU_RAMBASE_ADDR_HI   = 0x8d4,
+	FCU_RAMBASE_ADDR_LO   = 0x8d8
+};
+
+enum fcu_cmd {
+	FCU_CTRL_CMD_NOOP  = 0,
+	FCU_CTRL_CMD_AUTH  = 1,
+	FCU_CTRL_CMD_LOAD  = 2,
+	FCU_CTRL_CMD_START = 3
+};
+
+enum fcu_sts {
+	FCU_STS_NO_STS    = 0,
+	FCU_STS_VERI_DONE = 1,
+	FCU_STS_LOAD_DONE = 2,
+	FCU_STS_VERI_FAIL = 3,
+	FCU_STS_LOAD_FAIL = 4,
+	FCU_STS_BUSY      = 5
+};
+#define UA_ECS                      (0x1 << 31)
+#define ACS_ABO_BITPOS              31
+#define ACS_ACNO                    0x7
+#define CE_ENABLE_BITPOS            0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS   16
+#define CE_LMADDR_1_GLOBAL_BITPOS   17
+#define CE_NN_MODE_BITPOS           20
+#define CE_REG_PAR_ERR_BITPOS       25
+#define CE_BREAKPOINT_BITPOS        27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS    31
+#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY              (0x1)
+#define LCS_STATUS          (0x1)
+#define MMC_SHARE_CS_BITPOS         2
+#define GLOBAL_CSR                0xA00
+#define FCU_CTRL_AE_POS     0x8
+#define FCU_AUTH_STS_MASK   0x7
+#define FCU_STS_DONE_POS    0x9
+#define FCU_STS_AUTHFWLD_POS 0X8
+#define FCU_LOADED_AE_POS   0x16
+#define FW_AUTH_WAIT_PERIOD 10
+#define FW_AUTH_MAX_RETRY   300
+
+#define SET_CAP_CSR(handle, csr, val) \
+	ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+	ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr)
+#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
+#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
+#define AE_CSR(handle, ae) \
+	((char __iomem *)handle->hal_cap_ae_local_csr_addr_v + \
+	((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
+#define SET_AE_CSR(handle, ae, csr, val) \
+	ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+	((char __iomem *)handle->hal_cap_ae_xfer_csr_addr_v + \
+	((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+	((reg & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+	ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+	ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
new file mode 100644
index 0000000..121d5e6
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+	ICP_QAT_HW_AE_0 = 0,
+	ICP_QAT_HW_AE_1 = 1,
+	ICP_QAT_HW_AE_2 = 2,
+	ICP_QAT_HW_AE_3 = 3,
+	ICP_QAT_HW_AE_4 = 4,
+	ICP_QAT_HW_AE_5 = 5,
+	ICP_QAT_HW_AE_6 = 6,
+	ICP_QAT_HW_AE_7 = 7,
+	ICP_QAT_HW_AE_8 = 8,
+	ICP_QAT_HW_AE_9 = 9,
+	ICP_QAT_HW_AE_10 = 10,
+	ICP_QAT_HW_AE_11 = 11,
+	ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+	ICP_QAT_HW_QAT_0 = 0,
+	ICP_QAT_HW_QAT_1 = 1,
+	ICP_QAT_HW_QAT_2 = 2,
+	ICP_QAT_HW_QAT_3 = 3,
+	ICP_QAT_HW_QAT_4 = 4,
+	ICP_QAT_HW_QAT_5 = 5,
+	ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+	ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+	ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+	ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+	ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+	ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+	ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+	ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+	ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+	ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+	ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+	ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+	ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+	ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+	ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+	ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+	ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+	ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+	ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+	ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+	ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+	ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+	ICP_QAT_HW_AUTH_MODE0 = 0,
+	ICP_QAT_HW_AUTH_MODE1 = 1,
+	ICP_QAT_HW_AUTH_MODE2 = 2,
+	ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+	uint32_t config;
+	uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+	(((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+	((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+	(((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+	 QAT_AUTH_ALGO_SHA3_BITPOS) | \
+	 (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+	(algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+	& QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+	((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+	__be32 counter;
+	uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+	(((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+	struct icp_qat_hw_auth_config auth_config;
+	struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+	ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+	struct icp_qat_hw_auth_setup inner_setup;
+	uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+	struct icp_qat_hw_auth_setup outer_setup;
+	uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+	struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+	ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+	ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+	ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+	ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+	ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+	ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+	ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+	ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+	ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+	ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+	ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+	ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+	ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+	ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+	ICP_QAT_HW_CIPHER_F8_MODE = 3,
+	ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+	ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+	uint32_t val;
+	uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+	ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+	ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+	ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+	ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+	(((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+	((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+	((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+	((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+	struct icp_qat_hw_cipher_config cipher_config;
+	uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+	struct icp_qat_hw_cipher_aes256_f8 aes;
+} __aligned(64);
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
new file mode 100644
index 0000000..5d1ee7e
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -0,0 +1,528 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
+#define ICP_QAT_AC_C62X_DEV_TYPE   0x01000000
+#define ICP_QAT_AC_C3XXX_DEV_TYPE  0x02000000
+#define ICP_QAT_UCLO_MAX_AE       12
+#define ICP_QAT_UCLO_MAX_CTX      8
+#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE   0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG  128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
+#define ICP_QAT_UOF_OBJID_LEN     8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
+#define ICP_QAT_UOF_STRT        "UOF_STRT"
+#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
+#define ICP_QAT_UOF_LOCAL_SCOPE     1
+#define ICP_QAT_UOF_INIT_EXPR               0
+#define ICP_QAT_UOF_INIT_REG                1
+#define ICP_QAT_UOF_INIT_REG_CTX            2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
+#define ICP_QAT_SUOF_OBJ_ID_LEN             8
+#define ICP_QAT_SUOF_FID  0x53554f46
+#define ICP_QAT_SUOF_MAJVER 0x0
+#define ICP_QAT_SUOF_MINVER 0x1
+#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN    (50 * sizeof(unsigned long long))
+#define ICP_QAT_SIMG_AE_INSTS_LEN       (0x4000 * sizeof(unsigned long long))
+#define ICP_QAT_CSS_FWSK_MODULUS_LEN    256
+#define ICP_QAT_CSS_FWSK_EXPONENT_LEN   4
+#define ICP_QAT_CSS_FWSK_PAD_LEN        252
+#define ICP_QAT_CSS_FWSK_PUB_LEN   (ICP_QAT_CSS_FWSK_MODULUS_LEN + \
+				    ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
+				    ICP_QAT_CSS_FWSK_PAD_LEN)
+#define ICP_QAT_CSS_SIGNATURE_LEN   256
+#define ICP_QAT_CSS_AE_IMG_LEN     (sizeof(struct icp_qat_simg_ae_mode) + \
+				    ICP_QAT_SIMG_AE_INIT_SEQ_LEN +         \
+				    ICP_QAT_SIMG_AE_INSTS_LEN)
+#define ICP_QAT_CSS_AE_SIMG_LEN    (sizeof(struct icp_qat_css_hdr) + \
+				    ICP_QAT_CSS_FWSK_PUB_LEN + \
+				    ICP_QAT_CSS_SIGNATURE_LEN + \
+				    ICP_QAT_CSS_AE_IMG_LEN)
+#define ICP_QAT_AE_IMG_OFFSET	   (sizeof(struct icp_qat_css_hdr) + \
+				    ICP_QAT_CSS_FWSK_MODULUS_LEN + \
+				    ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
+				    ICP_QAT_CSS_SIGNATURE_LEN)
+#define ICP_QAT_CSS_MAX_IMAGE_LEN   0x40000
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+
+enum icp_qat_uof_mem_region {
+	ICP_QAT_UOF_SRAM_REGION = 0x0,
+	ICP_QAT_UOF_LMEM_REGION = 0x3,
+	ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+	ICP_NO_DEST	= 0,
+	ICP_GPA_REL	= 1,
+	ICP_GPA_ABS	= 2,
+	ICP_GPB_REL	= 3,
+	ICP_GPB_ABS	= 4,
+	ICP_SR_REL	= 5,
+	ICP_SR_RD_REL	= 6,
+	ICP_SR_WR_REL	= 7,
+	ICP_SR_ABS	= 8,
+	ICP_SR_RD_ABS	= 9,
+	ICP_SR_WR_ABS	= 10,
+	ICP_DR_REL	= 19,
+	ICP_DR_RD_REL	= 20,
+	ICP_DR_WR_REL	= 21,
+	ICP_DR_ABS	= 22,
+	ICP_DR_RD_ABS	= 23,
+	ICP_DR_WR_ABS	= 24,
+	ICP_LMEM	= 26,
+	ICP_LMEM0	= 27,
+	ICP_LMEM1	= 28,
+	ICP_NEIGH_REL	= 31,
+};
+
+enum icp_qat_css_fwtype {
+	CSS_AE_FIRMWARE = 0,
+	CSS_MMP_FIRMWARE = 1
+};
+
+struct icp_qat_uclo_page {
+	struct icp_qat_uclo_encap_page *encap_page;
+	struct icp_qat_uclo_region *region;
+	unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+	struct icp_qat_uclo_page *loaded;
+	struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+	struct icp_qat_uclo_region *region;
+	struct icp_qat_uclo_page *page;
+	struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+	struct icp_qat_uclo_encapme *encap_image;
+	unsigned int ctx_mask_assigned;
+	unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+	unsigned int slice_num;
+	unsigned int eff_ustore_size;
+	struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+	char *beg_uof;
+	struct icp_qat_uof_objhdr *obj_hdr;
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+	struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+	unsigned int start_addr;
+	unsigned int words_num;
+	uint64_t micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+	unsigned int def_page;
+	unsigned int page_region;
+	unsigned int beg_addr_v;
+	unsigned int beg_addr_p;
+	unsigned int micro_words_num;
+	unsigned int uwblock_num;
+	struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+	struct icp_qat_uof_image *img_ptr;
+	struct icp_qat_uclo_encap_page *page;
+	unsigned int ae_reg_num;
+	struct icp_qat_uof_ae_reg *ae_reg;
+	unsigned int init_regsym_num;
+	struct icp_qat_uof_init_regsym *init_regsym;
+	unsigned int sbreak_num;
+	struct icp_qat_uof_sbreak *sbreak;
+	unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+	unsigned int entry_num;
+	struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+	char *file_buff;
+	unsigned int checksum;
+	unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+	unsigned int table_len;
+	unsigned int reserved;
+	uint64_t strings;
+};
+
+struct icp_qat_uclo_objhandle {
+	unsigned int prod_type;
+	unsigned int prod_rev;
+	struct icp_qat_uclo_objhdr *obj_hdr;
+	struct icp_qat_uof_encap_obj encap_uof_obj;
+	struct icp_qat_uof_strtable str_table;
+	struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+	struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+	struct icp_qat_uclo_init_mem_table init_mem_tab;
+	struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+	struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+	int uimage_num;
+	int uword_in_bytes;
+	int global_inited;
+	unsigned int ae_num;
+	unsigned int ustore_phy_size;
+	void *obj_buf;
+	uint64_t *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+	unsigned int start_addr;
+	unsigned int words_num;
+	unsigned int uword_offset;
+	unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+	unsigned short file_id;
+	unsigned short reserved1;
+	char min_ver;
+	char maj_ver;
+	unsigned short reserved2;
+	unsigned short max_chunks;
+	unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+	char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+	unsigned int checksum;
+	unsigned int offset;
+	unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+	unsigned int ac_dev_type;
+	unsigned short min_cpu_ver;
+	unsigned short max_cpu_ver;
+	short max_chunks;
+	short num_chunks;
+	unsigned int reserved1;
+	unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+	char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+	unsigned int offset;
+	unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+	unsigned int offset_in_byte;
+	unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+	unsigned int sym_name;
+	char region;
+	char scope;
+	unsigned short reserved1;
+	unsigned int addr;
+	unsigned int num_in_bytes;
+	unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+	unsigned int sym_name;
+	char init_type;
+	char value_type;
+	char reg_type;
+	unsigned char ctx;
+	unsigned int reg_addr;
+	unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+	unsigned int sram_base;
+	unsigned int sram_size;
+	unsigned int sram_alignment;
+	unsigned int sdram_base;
+	unsigned int sdram_size;
+	unsigned int sdram_alignment;
+	unsigned int sdram1_base;
+	unsigned int sdram1_size;
+	unsigned int sdram1_alignment;
+	unsigned int scratch_base;
+	unsigned int scratch_size;
+	unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+	char tool_id[ICP_QAT_UOF_OBJID_LEN];
+	int tool_ver;
+	unsigned int reserved1;
+	unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+	unsigned int page_num;
+	unsigned int virt_uaddr;
+	unsigned char sbreak_type;
+	unsigned char reg_type;
+	unsigned short reserved1;
+	unsigned int addr_offset;
+	unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+	unsigned int page_region;
+	unsigned int page_num;
+	unsigned char def_page;
+	unsigned char reserved2;
+	unsigned short reserved1;
+	unsigned int beg_addr_v;
+	unsigned int beg_addr_p;
+	unsigned int neigh_reg_tab_offset;
+	unsigned int uc_var_tab_offset;
+	unsigned int imp_var_tab_offset;
+	unsigned int imp_expr_tab_offset;
+	unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+	unsigned int img_name;
+	unsigned int ae_assigned;
+	unsigned int ctx_assigned;
+	unsigned int ac_dev_type;
+	unsigned int entry_address;
+	unsigned int fill_pattern[2];
+	unsigned int reloadable_size;
+	unsigned char sensitivity;
+	unsigned char reserved;
+	unsigned short ae_mode;
+	unsigned short max_ver;
+	unsigned short min_ver;
+	unsigned short image_attrib;
+	unsigned short reserved2;
+	unsigned short page_region_num;
+	unsigned short numpages;
+	unsigned int reg_tab_offset;
+	unsigned int init_reg_sym_tab;
+	unsigned int sbreak_tab;
+	unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+	unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+	unsigned int name;
+	unsigned int vis_name;
+	unsigned short type;
+	unsigned short addr;
+	unsigned short access_mode;
+	unsigned char visible;
+	unsigned char reserved1;
+	unsigned short ref_count;
+	unsigned short reserved2;
+	unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+	unsigned int micro_words_num;
+	unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+	unsigned int ae;
+	unsigned int addr;
+	unsigned int *value;
+	unsigned int size;
+	struct icp_qat_uof_batch_init *next;
+};
+
+struct icp_qat_suof_img_hdr {
+	char          *simg_buf;
+	unsigned long simg_len;
+	char          *css_header;
+	char          *css_key;
+	char          *css_signature;
+	char          *css_simg;
+	unsigned long simg_size;
+	unsigned int  ae_num;
+	unsigned int  ae_mask;
+	unsigned int  fw_type;
+	unsigned long simg_name;
+	unsigned long appmeta_data;
+};
+
+struct icp_qat_suof_img_tbl {
+	unsigned int num_simgs;
+	struct icp_qat_suof_img_hdr *simg_hdr;
+};
+
+struct icp_qat_suof_handle {
+	unsigned int  file_id;
+	unsigned int  check_sum;
+	char          min_ver;
+	char          maj_ver;
+	char          fw_type;
+	char          *suof_buf;
+	unsigned int  suof_size;
+	char          *sym_str;
+	unsigned int  sym_size;
+	struct icp_qat_suof_img_tbl img_table;
+};
+
+struct icp_qat_fw_auth_desc {
+	unsigned int   img_len;
+	unsigned int   reserved;
+	unsigned int   css_hdr_high;
+	unsigned int   css_hdr_low;
+	unsigned int   img_high;
+	unsigned int   img_low;
+	unsigned int   signature_high;
+	unsigned int   signature_low;
+	unsigned int   fwsk_pub_high;
+	unsigned int   fwsk_pub_low;
+	unsigned int   img_ae_mode_data_high;
+	unsigned int   img_ae_mode_data_low;
+	unsigned int   img_ae_init_data_high;
+	unsigned int   img_ae_init_data_low;
+	unsigned int   img_ae_insts_high;
+	unsigned int   img_ae_insts_low;
+};
+
+struct icp_qat_auth_chunk {
+	struct icp_qat_fw_auth_desc fw_auth_desc;
+	u64 chunk_size;
+	u64 chunk_bus_addr;
+};
+
+struct icp_qat_css_hdr {
+	unsigned int module_type;
+	unsigned int header_len;
+	unsigned int header_ver;
+	unsigned int module_id;
+	unsigned int module_vendor;
+	unsigned int date;
+	unsigned int size;
+	unsigned int key_size;
+	unsigned int module_size;
+	unsigned int exponent_size;
+	unsigned int fw_type;
+	unsigned int reserved[21];
+};
+
+struct icp_qat_simg_ae_mode {
+	unsigned int     file_id;
+	unsigned short   maj_ver;
+	unsigned short   min_ver;
+	unsigned int     dev_type;
+	unsigned short   devmax_ver;
+	unsigned short   devmin_ver;
+	unsigned int     ae_mask;
+	unsigned int     ctx_enables;
+	char             fw_type;
+	char             ctx_mode;
+	char             nn_mode;
+	char             lm0_mode;
+	char             lm1_mode;
+	char             scs_mode;
+	char             lm2_mode;
+	char             lm3_mode;
+	char             tindex_mode;
+	unsigned char    reserved[7];
+	char             simg_name[256];
+	char             appmeta_data[256];
+};
+
+struct icp_qat_suof_filehdr {
+	unsigned int     file_id;
+	unsigned int     check_sum;
+	char             min_ver;
+	char             maj_ver;
+	char             fw_type;
+	char             reserved;
+	unsigned short   max_chunks;
+	unsigned short   num_chunks;
+};
+
+struct icp_qat_suof_chunk_hdr {
+	char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
+	u64 offset;
+	u64 size;
+};
+
+struct icp_qat_suof_strtable {
+	unsigned int tab_length;
+	unsigned int strings;
+};
+
+struct icp_qat_suof_objhdr {
+	unsigned int img_length;
+	unsigned int reserved;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
new file mode 100644
index 0000000..1138e41
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -0,0 +1,1318 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
+				       ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
+	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+				       ICP_QAT_HW_CIPHER_DECRYPT)
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_alg_buf {
+	uint32_t len;
+	uint32_t resrvd;
+	uint64_t addr;
+} __packed;
+
+struct qat_alg_buf_list {
+	uint64_t resrvd;
+	uint32_t num_bufs;
+	uint32_t num_mapped_bufs;
+	struct qat_alg_buf bufers[];
+} __packed __aligned(64);
+
+/* Common content descriptor */
+struct qat_alg_cd {
+	union {
+		struct qat_enc { /* Encrypt content desc */
+			struct icp_qat_hw_cipher_algo_blk cipher;
+			struct icp_qat_hw_auth_algo_blk hash;
+		} qat_enc_cd;
+		struct qat_dec { /* Decrytp content desc */
+			struct icp_qat_hw_auth_algo_blk hash;
+			struct icp_qat_hw_cipher_algo_blk cipher;
+		} qat_dec_cd;
+	};
+} __aligned(64);
+
+struct qat_alg_aead_ctx {
+	struct qat_alg_cd *enc_cd;
+	struct qat_alg_cd *dec_cd;
+	dma_addr_t enc_cd_paddr;
+	dma_addr_t dec_cd_paddr;
+	struct icp_qat_fw_la_bulk_req enc_fw_req;
+	struct icp_qat_fw_la_bulk_req dec_fw_req;
+	struct crypto_shash *hash_tfm;
+	enum icp_qat_hw_auth_algo qat_hash_alg;
+	struct qat_crypto_instance *inst;
+};
+
+struct qat_alg_ablkcipher_ctx {
+	struct icp_qat_hw_cipher_algo_blk *enc_cd;
+	struct icp_qat_hw_cipher_algo_blk *dec_cd;
+	dma_addr_t enc_cd_paddr;
+	dma_addr_t dec_cd_paddr;
+	struct icp_qat_fw_la_bulk_req enc_fw_req;
+	struct icp_qat_fw_la_bulk_req dec_fw_req;
+	struct qat_crypto_instance *inst;
+	struct crypto_tfm *tfm;
+	spinlock_t lock;	/* protects qat_alg_ablkcipher_ctx struct */
+};
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+	switch (qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		return ICP_QAT_HW_SHA1_STATE1_SZ;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		return ICP_QAT_HW_SHA256_STATE1_SZ;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		return ICP_QAT_HW_SHA512_STATE1_SZ;
+	default:
+		return -EFAULT;
+	};
+	return -EFAULT;
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+				  struct qat_alg_aead_ctx *ctx,
+				  const uint8_t *auth_key,
+				  unsigned int auth_keylen)
+{
+	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
+	struct sha1_state sha1;
+	struct sha256_state sha256;
+	struct sha512_state sha512;
+	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+	char ipad[block_size];
+	char opad[block_size];
+	__be32 *hash_state_out;
+	__be64 *hash512_state_out;
+	int i, offset;
+
+	memset(ipad, 0, block_size);
+	memset(opad, 0, block_size);
+	shash->tfm = ctx->hash_tfm;
+	shash->flags = 0x0;
+
+	if (auth_keylen > block_size) {
+		int ret = crypto_shash_digest(shash, auth_key,
+					      auth_keylen, ipad);
+		if (ret)
+			return ret;
+
+		memcpy(opad, ipad, digest_size);
+	} else {
+		memcpy(ipad, auth_key, auth_keylen);
+		memcpy(opad, auth_key, auth_keylen);
+	}
+
+	for (i = 0; i < block_size; i++) {
+		char *ipad_ptr = ipad + i;
+		char *opad_ptr = opad + i;
+		*ipad_ptr ^= HMAC_IPAD_VALUE;
+		*opad_ptr ^= HMAC_OPAD_VALUE;
+	}
+
+	if (crypto_shash_init(shash))
+		return -EFAULT;
+
+	if (crypto_shash_update(shash, ipad, block_size))
+		return -EFAULT;
+
+	hash_state_out = (__be32 *)hash->sha.state1;
+	hash512_state_out = (__be64 *)hash_state_out;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		if (crypto_shash_export(shash, &sha1))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha1.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		if (crypto_shash_export(shash, &sha256))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha256.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		if (crypto_shash_export(shash, &sha512))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
+		break;
+	default:
+		return -EFAULT;
+	}
+
+	if (crypto_shash_init(shash))
+		return -EFAULT;
+
+	if (crypto_shash_update(shash, opad, block_size))
+		return -EFAULT;
+
+	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+	hash512_state_out = (__be64 *)hash_state_out;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		if (crypto_shash_export(shash, &sha1))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha1.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		if (crypto_shash_export(shash, &sha256))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha256.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		if (crypto_shash_export(shash, &sha512))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
+		break;
+	default:
+		return -EFAULT;
+	}
+	memzero_explicit(ipad, block_size);
+	memzero_explicit(opad, block_size);
+	return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+	header->hdr_flags =
+		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+					    QAT_COMN_PTR_TYPE_SGL);
+	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+				  ICP_QAT_FW_LA_PARTIAL_NONE);
+	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
+					 int alg,
+					 struct crypto_authenc_keys *keys,
+					 int mode)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+	struct icp_qat_hw_auth_algo_blk *hash =
+		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+	void *ptr = &req_tmpl->cd_ctrl;
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+
+	/* CD setup */
+	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+	hash->sha.inner_setup.auth_config.config =
+		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+					     ctx->qat_hash_alg, digestsize);
+	hash->sha.inner_setup.auth_counter.counter =
+		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+		return -EFAULT;
+
+	/* Request setup */
+	qat_alg_init_common_hdr(header);
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_RET_AUTH_RES);
+	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+	/* Cipher CD config setup */
+	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	/* Auth CD config setup */
+	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+	hash_cd_ctrl->inner_res_sz = digestsize;
+	hash_cd_ctrl->final_sz = digestsize;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		hash_cd_ctrl->inner_state1_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+		hash_cd_ctrl->inner_state2_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+		break;
+	default:
+		break;
+	}
+	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+			((sizeof(struct icp_qat_hw_auth_setup) +
+			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	return 0;
+}
+
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
+					 int alg,
+					 struct crypto_authenc_keys *keys,
+					 int mode)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+	struct icp_qat_hw_cipher_algo_blk *cipher =
+		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+		sizeof(struct icp_qat_hw_auth_setup) +
+		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+	void *ptr = &req_tmpl->cd_ctrl;
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(struct icp_qat_fw_la_auth_req_params *)
+		((char *)&req_tmpl->serv_specif_rqpars +
+		sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+	/* CD setup */
+	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
+	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+	hash->sha.inner_setup.auth_config.config =
+		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+					     ctx->qat_hash_alg,
+					     digestsize);
+	hash->sha.inner_setup.auth_counter.counter =
+		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+		return -EFAULT;
+
+	/* Request setup */
+	qat_alg_init_common_hdr(header);
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_CMP_AUTH_RES);
+	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+	/* Cipher CD config setup */
+	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+	cipher_cd_ctrl->cipher_cfg_offset =
+		(sizeof(struct icp_qat_hw_auth_setup) +
+		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+	/* Auth CD config setup */
+	hash_cd_ctrl->hash_cfg_offset = 0;
+	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+	hash_cd_ctrl->inner_res_sz = digestsize;
+	hash_cd_ctrl->final_sz = digestsize;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		hash_cd_ctrl->inner_state1_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+		hash_cd_ctrl->inner_state2_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+		break;
+	default:
+		break;
+	}
+
+	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+			((sizeof(struct icp_qat_hw_auth_setup) +
+			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+	auth_param->auth_res_sz = digestsize;
+	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	return 0;
+}
+
+static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
+					struct icp_qat_fw_la_bulk_req *req,
+					struct icp_qat_hw_cipher_algo_blk *cd,
+					const uint8_t *key, unsigned int keylen)
+{
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
+
+	memcpy(cd->aes.key, key, keylen);
+	qat_alg_init_common_hdr(header);
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cd_pars->u.s.content_desc_params_sz =
+				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+	/* Cipher CD config setup */
+	cd_ctrl->cipher_key_sz = keylen >> 3;
+	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+	cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+}
+
+static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
+					int alg, const uint8_t *key,
+					unsigned int keylen, int mode)
+{
+	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
+	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+	qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
+	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+}
+
+static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
+					int alg, const uint8_t *key,
+					unsigned int keylen, int mode)
+{
+	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
+	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+	qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
+	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+
+	if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
+		dec_cd->aes.cipher_config.val =
+					QAT_AES_HW_CONFIG_DEC(alg, mode);
+	else
+		dec_cd->aes.cipher_config.val =
+					QAT_AES_HW_CONFIG_ENC(alg, mode);
+}
+
+static int qat_alg_validate_key(int key_len, int *alg, int mode)
+{
+	if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
+		switch (key_len) {
+		case AES_KEYSIZE_128:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+			break;
+		case AES_KEYSIZE_192:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+			break;
+		case AES_KEYSIZE_256:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		switch (key_len) {
+		case AES_KEYSIZE_128 << 1:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+			break;
+		case AES_KEYSIZE_256 << 1:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
+				      unsigned int keylen,  int mode)
+{
+	struct crypto_authenc_keys keys;
+	int alg;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen))
+		goto bad_key;
+
+	if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
+		goto bad_key;
+
+	if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
+		goto error;
+
+	if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
+		goto error;
+
+	memzero_explicit(&keys, sizeof(keys));
+	return 0;
+bad_key:
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+error:
+	memzero_explicit(&keys, sizeof(keys));
+	return -EFAULT;
+}
+
+static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
+					    const uint8_t *key,
+					    unsigned int keylen,
+					    int mode)
+{
+	int alg;
+
+	if (qat_alg_validate_key(keylen, &alg, mode))
+		goto bad_key;
+
+	qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
+	qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
+	return 0;
+bad_key:
+	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+			       unsigned int keylen)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev;
+
+	if (ctx->enc_cd) {
+		/* rekeying */
+		dev = &GET_DEV(ctx->inst->accel_dev);
+		memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+		memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+		memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+		memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+	} else {
+		/* new key */
+		int node = get_current_node();
+		struct qat_crypto_instance *inst =
+				qat_crypto_get_instance_node(node);
+		if (!inst) {
+			return -EINVAL;
+		}
+
+		dev = &GET_DEV(inst->accel_dev);
+		ctx->inst = inst;
+		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
+						  &ctx->enc_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->enc_cd) {
+			return -ENOMEM;
+		}
+		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
+						  &ctx->dec_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->dec_cd) {
+			goto out_free_enc;
+		}
+	}
+	if (qat_alg_aead_init_sessions(tfm, key, keylen,
+				       ICP_QAT_HW_CIPHER_CBC_MODE))
+		goto out_free_all;
+
+	return 0;
+
+out_free_all:
+	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+			  ctx->dec_cd, ctx->dec_cd_paddr);
+	ctx->dec_cd = NULL;
+out_free_enc:
+	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+			  ctx->enc_cd, ctx->enc_cd_paddr);
+	ctx->enc_cd = NULL;
+	return -ENOMEM;
+}
+
+static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+			      struct qat_crypto_request *qat_req)
+{
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_alg_buf_list *bl = qat_req->buf.bl;
+	struct qat_alg_buf_list *blout = qat_req->buf.blout;
+	dma_addr_t blp = qat_req->buf.blp;
+	dma_addr_t blpout = qat_req->buf.bloutp;
+	size_t sz = qat_req->buf.sz;
+	size_t sz_out = qat_req->buf.sz_out;
+	int i;
+
+	for (i = 0; i < bl->num_bufs; i++)
+		dma_unmap_single(dev, bl->bufers[i].addr,
+				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
+
+	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+	kfree(bl);
+	if (blp != blpout) {
+		/* If out of place operation dma unmap only data */
+		int bufless = blout->num_bufs - blout->num_mapped_bufs;
+
+		for (i = bufless; i < blout->num_bufs; i++) {
+			dma_unmap_single(dev, blout->bufers[i].addr,
+					 blout->bufers[i].len,
+					 DMA_BIDIRECTIONAL);
+		}
+		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+		kfree(blout);
+	}
+}
+
+static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+			       struct scatterlist *sgl,
+			       struct scatterlist *sglout,
+			       struct qat_crypto_request *qat_req)
+{
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	int i, sg_nctr = 0;
+	int n = sg_nents(sgl);
+	struct qat_alg_buf_list *bufl;
+	struct qat_alg_buf_list *buflout = NULL;
+	dma_addr_t blp;
+	dma_addr_t bloutp = 0;
+	struct scatterlist *sg;
+	size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
+			((1 + n) * sizeof(struct qat_alg_buf));
+
+	if (unlikely(!n))
+		return -EINVAL;
+
+	bufl = kzalloc_node(sz, GFP_ATOMIC,
+			    dev_to_node(&GET_DEV(inst->accel_dev)));
+	if (unlikely(!bufl))
+		return -ENOMEM;
+
+	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, blp)))
+		goto err_in;
+
+	for_each_sg(sgl, sg, n, i) {
+		int y = sg_nctr;
+
+		if (!sg->length)
+			continue;
+
+		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+						      sg->length,
+						      DMA_BIDIRECTIONAL);
+		bufl->bufers[y].len = sg->length;
+		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+			goto err_in;
+		sg_nctr++;
+	}
+	bufl->num_bufs = sg_nctr;
+	qat_req->buf.bl = bufl;
+	qat_req->buf.blp = blp;
+	qat_req->buf.sz = sz;
+	/* Handle out of place operation */
+	if (sgl != sglout) {
+		struct qat_alg_buf *bufers;
+
+		n = sg_nents(sglout);
+		sz_out = sizeof(struct qat_alg_buf_list) +
+			((1 + n) * sizeof(struct qat_alg_buf));
+		sg_nctr = 0;
+		buflout = kzalloc_node(sz_out, GFP_ATOMIC,
+				       dev_to_node(&GET_DEV(inst->accel_dev)));
+		if (unlikely(!buflout))
+			goto err_in;
+		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, bloutp)))
+			goto err_out;
+		bufers = buflout->bufers;
+		for_each_sg(sglout, sg, n, i) {
+			int y = sg_nctr;
+
+			if (!sg->length)
+				continue;
+
+			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+							sg->length,
+							DMA_BIDIRECTIONAL);
+			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+				goto err_out;
+			bufers[y].len = sg->length;
+			sg_nctr++;
+		}
+		buflout->num_bufs = sg_nctr;
+		buflout->num_mapped_bufs = sg_nctr;
+		qat_req->buf.blout = buflout;
+		qat_req->buf.bloutp = bloutp;
+		qat_req->buf.sz_out = sz_out;
+	} else {
+		/* Otherwise set the src and dst to the same address */
+		qat_req->buf.bloutp = qat_req->buf.blp;
+		qat_req->buf.sz_out = 0;
+	}
+	return 0;
+
+err_out:
+	n = sg_nents(sglout);
+	for (i = 0; i < n; i++)
+		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+			dma_unmap_single(dev, buflout->bufers[i].addr,
+					 buflout->bufers[i].len,
+					 DMA_BIDIRECTIONAL);
+	if (!dma_mapping_error(dev, bloutp))
+		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+	kfree(buflout);
+
+err_in:
+	n = sg_nents(sgl);
+	for (i = 0; i < n; i++)
+		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
+			dma_unmap_single(dev, bufl->bufers[i].addr,
+					 bufl->bufers[i].len,
+					 DMA_BIDIRECTIONAL);
+
+	if (!dma_mapping_error(dev, blp))
+		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+	kfree(bufl);
+
+	dev_err(dev, "Failed to map buf for dma\n");
+	return -ENOMEM;
+}
+
+static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+				  struct qat_crypto_request *qat_req)
+{
+	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct aead_request *areq = qat_req->aead_req;
+	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+	qat_alg_free_bufl(inst, qat_req);
+	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+		res = -EBADMSG;
+	areq->base.complete(&areq->base, res);
+}
+
+static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+					struct qat_crypto_request *qat_req)
+{
+	struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct ablkcipher_request *areq = qat_req->ablkcipher_req;
+	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+	qat_alg_free_bufl(inst, qat_req);
+	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+		res = -EINVAL;
+	areq->base.complete(&areq->base, res);
+}
+
+void qat_alg_callback(void *resp)
+{
+	struct icp_qat_fw_la_resp *qat_resp = resp;
+	struct qat_crypto_request *qat_req =
+				(void *)(__force long)qat_resp->opaque_data;
+
+	qat_req->cb(qat_resp, qat_req);
+}
+
+static int qat_alg_aead_dec(struct aead_request *areq)
+{
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	int digst_size = crypto_aead_authsize(aead_tfm);
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->dec_fw_req;
+	qat_req->aead_ctx = ctx;
+	qat_req->aead_req = areq;
+	qat_req->cb = qat_aead_alg_callback;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	cipher_param->cipher_length = areq->cryptlen - digst_size;
+	cipher_param->cipher_offset = areq->assoclen;
+	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+	auth_param->auth_off = 0;
+	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_aead_enc(struct aead_request *areq)
+{
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	uint8_t *iv = areq->iv;
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->enc_fw_req;
+	qat_req->aead_ctx = ctx;
+	qat_req->aead_req = areq;
+	qat_req->cb = qat_aead_alg_callback;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+	cipher_param->cipher_length = areq->cryptlen;
+	cipher_param->cipher_offset = areq->assoclen;
+
+	auth_param->auth_off = 0;
+	auth_param->auth_len = areq->assoclen + areq->cryptlen;
+
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+				     const u8 *key, unsigned int keylen,
+				     int mode)
+{
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct device *dev;
+
+	spin_lock(&ctx->lock);
+	if (ctx->enc_cd) {
+		/* rekeying */
+		dev = &GET_DEV(ctx->inst->accel_dev);
+		memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+		memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+		memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+		memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+	} else {
+		/* new key */
+		int node = get_current_node();
+		struct qat_crypto_instance *inst =
+				qat_crypto_get_instance_node(node);
+		if (!inst) {
+			spin_unlock(&ctx->lock);
+			return -EINVAL;
+		}
+
+		dev = &GET_DEV(inst->accel_dev);
+		ctx->inst = inst;
+		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
+						  &ctx->enc_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->enc_cd) {
+			spin_unlock(&ctx->lock);
+			return -ENOMEM;
+		}
+		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
+						  &ctx->dec_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->dec_cd) {
+			spin_unlock(&ctx->lock);
+			goto out_free_enc;
+		}
+	}
+	spin_unlock(&ctx->lock);
+	if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
+		goto out_free_all;
+
+	return 0;
+
+out_free_all:
+	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+			  ctx->dec_cd, ctx->dec_cd_paddr);
+	ctx->dec_cd = NULL;
+out_free_enc:
+	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+			  ctx->enc_cd, ctx->enc_cd_paddr);
+	ctx->enc_cd = NULL;
+	return -ENOMEM;
+}
+
+static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
+					 const u8 *key, unsigned int keylen)
+{
+	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+					 ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
+					 const u8 *key, unsigned int keylen)
+{
+	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+					 ICP_QAT_HW_CIPHER_CTR_MODE);
+}
+
+static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
+					 const u8 *key, unsigned int keylen)
+{
+	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+					 ICP_QAT_HW_CIPHER_XTS_MODE);
+}
+
+static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->enc_fw_req;
+	qat_req->ablkcipher_ctx = ctx;
+	qat_req->ablkcipher_req = req;
+	qat_req->cb = qat_ablkcipher_alg_callback;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	cipher_param->cipher_length = req->nbytes;
+	cipher_param->cipher_offset = 0;
+	memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->dec_fw_req;
+	qat_req->ablkcipher_ctx = ctx;
+	qat_req->ablkcipher_req = req;
+	qat_req->cb = qat_ablkcipher_alg_callback;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	cipher_param->cipher_length = req->nbytes;
+	cipher_param->cipher_offset = 0;
+	memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_aead_init(struct crypto_aead *tfm,
+			     enum icp_qat_hw_auth_algo hash,
+			     const char *hash_name)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+	if (IS_ERR(ctx->hash_tfm))
+		return PTR_ERR(ctx->hash_tfm);
+	ctx->qat_hash_alg = hash;
+	crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+	return 0;
+}
+
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
+{
+	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
+{
+	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
+{
+	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev;
+
+	crypto_free_shash(ctx->hash_tfm);
+
+	if (!inst)
+		return;
+
+	dev = &GET_DEV(inst->accel_dev);
+	if (ctx->enc_cd) {
+		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+				  ctx->enc_cd, ctx->enc_cd_paddr);
+	}
+	if (ctx->dec_cd) {
+		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+				  ctx->dec_cd, ctx->dec_cd_paddr);
+	}
+	qat_crypto_put_instance(inst);
+}
+
+static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	spin_lock_init(&ctx->lock);
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
+	ctx->tfm = tfm;
+	return 0;
+}
+
+static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev;
+
+	if (!inst)
+		return;
+
+	dev = &GET_DEV(inst->accel_dev);
+	if (ctx->enc_cd) {
+		memset(ctx->enc_cd, 0,
+		       sizeof(struct icp_qat_hw_cipher_algo_blk));
+		dma_free_coherent(dev,
+				  sizeof(struct icp_qat_hw_cipher_algo_blk),
+				  ctx->enc_cd, ctx->enc_cd_paddr);
+	}
+	if (ctx->dec_cd) {
+		memset(ctx->dec_cd, 0,
+		       sizeof(struct icp_qat_hw_cipher_algo_blk));
+		dma_free_coherent(dev,
+				  sizeof(struct icp_qat_hw_cipher_algo_blk),
+				  ctx->dec_cd, ctx->dec_cd_paddr);
+	}
+	qat_crypto_put_instance(inst);
+}
+
+
+static struct aead_alg qat_aeads[] = { {
+	.base = {
+		.cra_name = "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha1_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+	.base = {
+		.cra_name = "authenc(hmac(sha256),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha256_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+	.base = {
+		.cra_name = "authenc(hmac(sha512),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha512_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct crypto_alg qat_algs[] = { {
+	.cra_name = "cbc(aes)",
+	.cra_driver_name = "qat_aes_cbc",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_ablkcipher_init,
+	.cra_exit = qat_alg_ablkcipher_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.setkey = qat_alg_ablkcipher_cbc_setkey,
+			.decrypt = qat_alg_ablkcipher_decrypt,
+			.encrypt = qat_alg_ablkcipher_encrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+	},
+}, {
+	.cra_name = "ctr(aes)",
+	.cra_driver_name = "qat_aes_ctr",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_ablkcipher_init,
+	.cra_exit = qat_alg_ablkcipher_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.setkey = qat_alg_ablkcipher_ctr_setkey,
+			.decrypt = qat_alg_ablkcipher_decrypt,
+			.encrypt = qat_alg_ablkcipher_encrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+	},
+}, {
+	.cra_name = "xts(aes)",
+	.cra_driver_name = "qat_aes_xts",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_ablkcipher_init,
+	.cra_exit = qat_alg_ablkcipher_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.setkey = qat_alg_ablkcipher_xts_setkey,
+			.decrypt = qat_alg_ablkcipher_decrypt,
+			.encrypt = qat_alg_ablkcipher_encrypt,
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+	},
+} };
+
+int qat_algs_register(void)
+{
+	int ret = 0, i;
+
+	mutex_lock(&algs_lock);
+	if (++active_devs != 1)
+		goto unlock;
+
+	for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+		qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+
+	ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	if (ret)
+		goto unlock;
+
+	for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+		qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
+
+	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+	if (ret)
+		goto unreg_algs;
+
+unlock:
+	mutex_unlock(&algs_lock);
+	return ret;
+
+unreg_algs:
+	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	goto unlock;
+}
+
+void qat_algs_unregister(void)
+{
+	mutex_lock(&algs_lock);
+	if (--active_devs != 0)
+		goto unlock;
+
+	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+
+unlock:
+	mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
new file mode 100644
index 0000000..320e785
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -0,0 +1,1362 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/dh.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <crypto/scatterwalk.h>
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+	union {
+		struct {
+			dma_addr_t m;
+			dma_addr_t e;
+			dma_addr_t n;
+		} enc;
+		struct {
+			dma_addr_t c;
+			dma_addr_t d;
+			dma_addr_t n;
+		} dec;
+		struct {
+			dma_addr_t c;
+			dma_addr_t p;
+			dma_addr_t q;
+			dma_addr_t dp;
+			dma_addr_t dq;
+			dma_addr_t qinv;
+		} dec_crt;
+		u64 in_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+	union {
+		struct {
+			dma_addr_t c;
+		} enc;
+		struct {
+			dma_addr_t m;
+		} dec;
+		u64 out_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+	char *n;
+	char *e;
+	char *d;
+	char *p;
+	char *q;
+	char *dp;
+	char *dq;
+	char *qinv;
+	dma_addr_t dma_n;
+	dma_addr_t dma_e;
+	dma_addr_t dma_d;
+	dma_addr_t dma_p;
+	dma_addr_t dma_q;
+	dma_addr_t dma_dp;
+	dma_addr_t dma_dq;
+	dma_addr_t dma_qinv;
+	unsigned int key_sz;
+	bool crt_mode;
+	struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_dh_input_params {
+	union {
+		struct {
+			dma_addr_t b;
+			dma_addr_t xa;
+			dma_addr_t p;
+		} in;
+		struct {
+			dma_addr_t xa;
+			dma_addr_t p;
+		} in_g2;
+		u64 in_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_dh_output_params {
+	union {
+		dma_addr_t r;
+		u64 out_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_dh_ctx {
+	char *g;
+	char *xa;
+	char *p;
+	dma_addr_t dma_g;
+	dma_addr_t dma_xa;
+	dma_addr_t dma_p;
+	unsigned int p_size;
+	bool g2;
+	struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_asym_request {
+	union {
+		struct qat_rsa_input_params rsa;
+		struct qat_dh_input_params dh;
+	} in;
+	union {
+		struct qat_rsa_output_params rsa;
+		struct qat_dh_output_params dh;
+	} out;
+	dma_addr_t phy_in;
+	dma_addr_t phy_out;
+	char *src_align;
+	char *dst_align;
+	struct icp_qat_fw_pke_request req;
+	union {
+		struct qat_rsa_ctx *rsa;
+		struct qat_dh_ctx *dh;
+	} ctx;
+	union {
+		struct akcipher_request *rsa;
+		struct kpp_request *dh;
+	} areq;
+	int err;
+	void (*cb)(struct icp_qat_fw_pke_resp *resp);
+} __aligned(64);
+
+static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+{
+	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+	struct kpp_request *areq = req->areq.dh;
+	struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
+	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+				resp->pke_resp_hdr.comn_resp_flags);
+
+	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+	if (areq->src) {
+		if (req->src_align)
+			dma_free_coherent(dev, req->ctx.dh->p_size,
+					  req->src_align, req->in.dh.in.b);
+		else
+			dma_unmap_single(dev, req->in.dh.in.b,
+					 req->ctx.dh->p_size, DMA_TO_DEVICE);
+	}
+
+	areq->dst_len = req->ctx.dh->p_size;
+	if (req->dst_align) {
+		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+					 areq->dst_len, 1);
+
+		dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
+				  req->out.dh.r);
+	} else {
+		dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+				 DMA_FROM_DEVICE);
+	}
+
+	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+			 DMA_TO_DEVICE);
+	dma_unmap_single(dev, req->phy_out,
+			 sizeof(struct qat_dh_output_params),
+			 DMA_TO_DEVICE);
+
+	kpp_request_complete(areq, err);
+}
+
+#define PKE_DH_1536 0x390c1a49
+#define PKE_DH_G2_1536 0x2e0b1a3e
+#define PKE_DH_2048 0x4d0c1a60
+#define PKE_DH_G2_2048 0x3e0b1a55
+#define PKE_DH_3072 0x510c1a77
+#define PKE_DH_G2_3072 0x3a0b1a6c
+#define PKE_DH_4096 0x690c1a8e
+#define PKE_DH_G2_4096 0x4a0b1a83
+
+static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 1536:
+		return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
+	case 2048:
+		return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
+	case 3072:
+		return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
+	case 4096:
+		return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
+	default:
+		return 0;
+	};
+}
+
+static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
+{
+	return kpp_tfm_ctx(tfm);
+}
+
+static int qat_dh_compute_value(struct kpp_request *req)
+{
+	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_asym_request *qat_req =
+			PTR_ALIGN(kpp_request_ctx(req), 64);
+	struct icp_qat_fw_pke_request *msg = &qat_req->req;
+	int ret, ctr = 0;
+	int n_input_params = 0;
+
+	if (unlikely(!ctx->xa))
+		return -EINVAL;
+
+	if (req->dst_len < ctx->p_size) {
+		req->dst_len = ctx->p_size;
+		return -EOVERFLOW;
+	}
+	memset(msg, '\0', sizeof(*msg));
+	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+	msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
+						    !req->src && ctx->g2);
+	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+		return -EINVAL;
+
+	qat_req->cb = qat_dh_cb;
+	qat_req->ctx.dh = ctx;
+	qat_req->areq.dh = req;
+	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	msg->pke_hdr.comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+	/*
+	 * If no source is provided use g as base
+	 */
+	if (req->src) {
+		qat_req->in.dh.in.xa = ctx->dma_xa;
+		qat_req->in.dh.in.p = ctx->dma_p;
+		n_input_params = 3;
+	} else {
+		if (ctx->g2) {
+			qat_req->in.dh.in_g2.xa = ctx->dma_xa;
+			qat_req->in.dh.in_g2.p = ctx->dma_p;
+			n_input_params = 2;
+		} else {
+			qat_req->in.dh.in.b = ctx->dma_g;
+			qat_req->in.dh.in.xa = ctx->dma_xa;
+			qat_req->in.dh.in.p = ctx->dma_p;
+			n_input_params = 3;
+		}
+	}
+
+	ret = -ENOMEM;
+	if (req->src) {
+		/*
+		 * src can be of any size in valid range, but HW expects it to
+		 * be the same as modulo p so in case it is different we need
+		 * to allocate a new buf and copy src data.
+		 * In other case we just need to map the user provided buffer.
+		 * Also need to make sure that it is in contiguous buffer.
+		 */
+		if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+			qat_req->src_align = NULL;
+			qat_req->in.dh.in.b = dma_map_single(dev,
+							     sg_virt(req->src),
+							     req->src_len,
+							     DMA_TO_DEVICE);
+			if (unlikely(dma_mapping_error(dev,
+						       qat_req->in.dh.in.b)))
+				return ret;
+
+		} else {
+			int shift = ctx->p_size - req->src_len;
+
+			qat_req->src_align = dma_zalloc_coherent(dev,
+								 ctx->p_size,
+								 &qat_req->in.dh.in.b,
+								 GFP_KERNEL);
+			if (unlikely(!qat_req->src_align))
+				return ret;
+
+			scatterwalk_map_and_copy(qat_req->src_align + shift,
+						 req->src, 0, req->src_len, 0);
+		}
+	}
+	/*
+	 * dst can be of any size in valid range, but HW expects it to be the
+	 * same as modulo m so in case it is different we need to allocate a
+	 * new buf and copy src data.
+	 * In other case we just need to map the user provided buffer.
+	 * Also need to make sure that it is in contiguous buffer.
+	 */
+	if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+		qat_req->dst_align = NULL;
+		qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
+						   req->dst_len,
+						   DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+			goto unmap_src;
+
+	} else {
+		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
+							 &qat_req->out.dh.r,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->dst_align))
+			goto unmap_src;
+	}
+
+	qat_req->in.dh.in_tab[n_input_params] = 0;
+	qat_req->out.dh.out_tab[1] = 0;
+	/* Mapping in.in.b or in.in_g2.xa is the same */
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
+					 sizeof(struct qat_dh_input_params),
+					 DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+		goto unmap_dst;
+
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
+					  sizeof(struct qat_dh_output_params),
+					  DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+		goto unmap_in_params;
+
+	msg->pke_mid.src_data_addr = qat_req->phy_in;
+	msg->pke_mid.dest_data_addr = qat_req->phy_out;
+	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+	msg->input_param_count = n_input_params;
+	msg->output_param_count = 1;
+
+	do {
+		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+	} while (ret == -EBUSY && ctr++ < 100);
+
+	if (!ret)
+		return -EINPROGRESS;
+
+	if (!dma_mapping_error(dev, qat_req->phy_out))
+		dma_unmap_single(dev, qat_req->phy_out,
+				 sizeof(struct qat_dh_output_params),
+				 DMA_TO_DEVICE);
+unmap_in_params:
+	if (!dma_mapping_error(dev, qat_req->phy_in))
+		dma_unmap_single(dev, qat_req->phy_in,
+				 sizeof(struct qat_dh_input_params),
+				 DMA_TO_DEVICE);
+unmap_dst:
+	if (qat_req->dst_align)
+		dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
+				  qat_req->out.dh.r);
+	else
+		if (!dma_mapping_error(dev, qat_req->out.dh.r))
+			dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+					 DMA_FROM_DEVICE);
+unmap_src:
+	if (req->src) {
+		if (qat_req->src_align)
+			dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
+					  qat_req->in.dh.in.b);
+		else
+			if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+				dma_unmap_single(dev, qat_req->in.dh.in.b,
+						 ctx->p_size,
+						 DMA_TO_DEVICE);
+	}
+	return ret;
+}
+
+static int qat_dh_check_params_length(unsigned int p_len)
+{
+	switch (p_len) {
+	case 1536:
+	case 2048:
+	case 3072:
+	case 4096:
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+
+	if (qat_dh_check_params_length(params->p_size << 3))
+		return -EINVAL;
+
+	ctx->p_size = params->p_size;
+	ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+	if (!ctx->p)
+		return -ENOMEM;
+	memcpy(ctx->p, params->p, ctx->p_size);
+
+	/* If g equals 2 don't copy it */
+	if (params->g_size == 1 && *(char *)params->g == 0x02) {
+		ctx->g2 = true;
+		return 0;
+	}
+
+	ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+	if (!ctx->g)
+		return -ENOMEM;
+	memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
+	       params->g_size);
+
+	return 0;
+}
+
+static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+{
+	if (ctx->g) {
+		dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+		ctx->g = NULL;
+	}
+	if (ctx->xa) {
+		dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+		ctx->xa = NULL;
+	}
+	if (ctx->p) {
+		dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+		ctx->p = NULL;
+	}
+	ctx->p_size = 0;
+	ctx->g2 = false;
+}
+
+static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+			     unsigned int len)
+{
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+	struct dh params;
+	int ret;
+
+	if (crypto_dh_decode_key(buf, len, &params) < 0)
+		return -EINVAL;
+
+	/* Free old secret if any */
+	qat_dh_clear_ctx(dev, ctx);
+
+	ret = qat_dh_set_params(ctx, &params);
+	if (ret < 0)
+		goto err_clear_ctx;
+
+	ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+				      GFP_KERNEL);
+	if (!ctx->xa) {
+		ret = -ENOMEM;
+		goto err_clear_ctx;
+	}
+	memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
+	       params.key_size);
+
+	return 0;
+
+err_clear_ctx:
+	qat_dh_clear_ctx(dev, ctx);
+	return ret;
+}
+
+static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
+{
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+	return ctx->p_size;
+}
+
+static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+{
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst =
+			qat_crypto_get_instance_node(get_current_node());
+
+	if (!inst)
+		return -EINVAL;
+
+	ctx->p_size = 0;
+	ctx->g2 = false;
+	ctx->inst = inst;
+	return 0;
+}
+
+static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+	qat_dh_clear_ctx(dev, ctx);
+	qat_crypto_put_instance(ctx->inst);
+}
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+	struct akcipher_request *areq = req->areq.rsa;
+	struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
+	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+				resp->pke_resp_hdr.comn_resp_flags);
+
+	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+	if (req->src_align)
+		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
+				  req->in.rsa.enc.m);
+	else
+		dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+				 DMA_TO_DEVICE);
+
+	areq->dst_len = req->ctx.rsa->key_sz;
+	if (req->dst_align) {
+		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+					 areq->dst_len, 1);
+
+		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
+				  req->out.rsa.enc.c);
+	} else {
+		dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+				 DMA_FROM_DEVICE);
+	}
+
+	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+			 DMA_TO_DEVICE);
+	dma_unmap_single(dev, req->phy_out,
+			 sizeof(struct qat_rsa_output_params),
+			 DMA_TO_DEVICE);
+
+	akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+	struct icp_qat_fw_pke_resp *resp = _resp;
+	struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
+
+	areq->cb(resp);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 512:
+		return PKE_RSA_EP_512;
+	case 1024:
+		return PKE_RSA_EP_1024;
+	case 1536:
+		return PKE_RSA_EP_1536;
+	case 2048:
+		return PKE_RSA_EP_2048;
+	case 3072:
+		return PKE_RSA_EP_3072;
+	case 4096:
+		return PKE_RSA_EP_4096;
+	default:
+		return 0;
+	};
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 512:
+		return PKE_RSA_DP1_512;
+	case 1024:
+		return PKE_RSA_DP1_1024;
+	case 1536:
+		return PKE_RSA_DP1_1536;
+	case 2048:
+		return PKE_RSA_DP1_2048;
+	case 3072:
+		return PKE_RSA_DP1_3072;
+	case 4096:
+		return PKE_RSA_DP1_4096;
+	default:
+		return 0;
+	};
+}
+
+#define PKE_RSA_DP2_512 0x1c131b57
+#define PKE_RSA_DP2_1024 0x26131c2d
+#define PKE_RSA_DP2_1536 0x45111d12
+#define PKE_RSA_DP2_2048 0x59121dfa
+#define PKE_RSA_DP2_3072 0x81121ed9
+#define PKE_RSA_DP2_4096 0xb1111fb2
+
+static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 512:
+		return PKE_RSA_DP2_512;
+	case 1024:
+		return PKE_RSA_DP2_1024;
+	case 1536:
+		return PKE_RSA_DP2_1536;
+	case 2048:
+		return PKE_RSA_DP2_2048;
+	case 3072:
+		return PKE_RSA_DP2_3072;
+	case 4096:
+		return PKE_RSA_DP2_4096;
+	default:
+		return 0;
+	};
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_asym_request *qat_req =
+			PTR_ALIGN(akcipher_request_ctx(req), 64);
+	struct icp_qat_fw_pke_request *msg = &qat_req->req;
+	int ret, ctr = 0;
+
+	if (unlikely(!ctx->n || !ctx->e))
+		return -EINVAL;
+
+	if (req->dst_len < ctx->key_sz) {
+		req->dst_len = ctx->key_sz;
+		return -EOVERFLOW;
+	}
+	memset(msg, '\0', sizeof(*msg));
+	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+		return -EINVAL;
+
+	qat_req->cb = qat_rsa_cb;
+	qat_req->ctx.rsa = ctx;
+	qat_req->areq.rsa = req;
+	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	msg->pke_hdr.comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+	qat_req->in.rsa.enc.e = ctx->dma_e;
+	qat_req->in.rsa.enc.n = ctx->dma_n;
+	ret = -ENOMEM;
+
+	/*
+	 * src can be of any size in valid range, but HW expects it to be the
+	 * same as modulo n so in case it is different we need to allocate a
+	 * new buf and copy src data.
+	 * In other case we just need to map the user provided buffer.
+	 * Also need to make sure that it is in contiguous buffer.
+	 */
+	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+		qat_req->src_align = NULL;
+		qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
+						   req->src_len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+			return ret;
+
+	} else {
+		int shift = ctx->key_sz - req->src_len;
+
+		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->in.rsa.enc.m,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->src_align))
+			return ret;
+
+		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+					 0, req->src_len, 0);
+	}
+	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+		qat_req->dst_align = NULL;
+		qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
+							req->dst_len,
+							DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+			goto unmap_src;
+
+	} else {
+		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->out.rsa.enc.c,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->dst_align))
+			goto unmap_src;
+
+	}
+	qat_req->in.rsa.in_tab[3] = 0;
+	qat_req->out.rsa.out_tab[1] = 0;
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
+					 sizeof(struct qat_rsa_input_params),
+					 DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+		goto unmap_dst;
+
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
+					  sizeof(struct qat_rsa_output_params),
+					  DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+		goto unmap_in_params;
+
+	msg->pke_mid.src_data_addr = qat_req->phy_in;
+	msg->pke_mid.dest_data_addr = qat_req->phy_out;
+	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+	msg->input_param_count = 3;
+	msg->output_param_count = 1;
+	do {
+		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+	} while (ret == -EBUSY && ctr++ < 100);
+
+	if (!ret)
+		return -EINPROGRESS;
+
+	if (!dma_mapping_error(dev, qat_req->phy_out))
+		dma_unmap_single(dev, qat_req->phy_out,
+				 sizeof(struct qat_rsa_output_params),
+				 DMA_TO_DEVICE);
+unmap_in_params:
+	if (!dma_mapping_error(dev, qat_req->phy_in))
+		dma_unmap_single(dev, qat_req->phy_in,
+				 sizeof(struct qat_rsa_input_params),
+				 DMA_TO_DEVICE);
+unmap_dst:
+	if (qat_req->dst_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
+				  qat_req->out.rsa.enc.c);
+	else
+		if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+			dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+					 ctx->key_sz, DMA_FROM_DEVICE);
+unmap_src:
+	if (qat_req->src_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+				  qat_req->in.rsa.enc.m);
+	else
+		if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+			dma_unmap_single(dev, qat_req->in.rsa.enc.m,
+					 ctx->key_sz, DMA_TO_DEVICE);
+	return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_asym_request *qat_req =
+			PTR_ALIGN(akcipher_request_ctx(req), 64);
+	struct icp_qat_fw_pke_request *msg = &qat_req->req;
+	int ret, ctr = 0;
+
+	if (unlikely(!ctx->n || !ctx->d))
+		return -EINVAL;
+
+	if (req->dst_len < ctx->key_sz) {
+		req->dst_len = ctx->key_sz;
+		return -EOVERFLOW;
+	}
+	memset(msg, '\0', sizeof(*msg));
+	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
+		qat_rsa_dec_fn_id_crt(ctx->key_sz) :
+		qat_rsa_dec_fn_id(ctx->key_sz);
+	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+		return -EINVAL;
+
+	qat_req->cb = qat_rsa_cb;
+	qat_req->ctx.rsa = ctx;
+	qat_req->areq.rsa = req;
+	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	msg->pke_hdr.comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+	if (ctx->crt_mode) {
+		qat_req->in.rsa.dec_crt.p = ctx->dma_p;
+		qat_req->in.rsa.dec_crt.q = ctx->dma_q;
+		qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
+		qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
+		qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
+	} else {
+		qat_req->in.rsa.dec.d = ctx->dma_d;
+		qat_req->in.rsa.dec.n = ctx->dma_n;
+	}
+	ret = -ENOMEM;
+
+	/*
+	 * src can be of any size in valid range, but HW expects it to be the
+	 * same as modulo n so in case it is different we need to allocate a
+	 * new buf and copy src data.
+	 * In other case we just need to map the user provided buffer.
+	 * Also need to make sure that it is in contiguous buffer.
+	 */
+	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+		qat_req->src_align = NULL;
+		qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
+						   req->dst_len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+			return ret;
+
+	} else {
+		int shift = ctx->key_sz - req->src_len;
+
+		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->in.rsa.dec.c,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->src_align))
+			return ret;
+
+		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+					 0, req->src_len, 0);
+	}
+	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+		qat_req->dst_align = NULL;
+		qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
+						    req->dst_len,
+						    DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+			goto unmap_src;
+
+	} else {
+		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->out.rsa.dec.m,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->dst_align))
+			goto unmap_src;
+
+	}
+
+	if (ctx->crt_mode)
+		qat_req->in.rsa.in_tab[6] = 0;
+	else
+		qat_req->in.rsa.in_tab[3] = 0;
+	qat_req->out.rsa.out_tab[1] = 0;
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
+					 sizeof(struct qat_rsa_input_params),
+					 DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+		goto unmap_dst;
+
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
+					  sizeof(struct qat_rsa_output_params),
+					  DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+		goto unmap_in_params;
+
+	msg->pke_mid.src_data_addr = qat_req->phy_in;
+	msg->pke_mid.dest_data_addr = qat_req->phy_out;
+	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+	if (ctx->crt_mode)
+		msg->input_param_count = 6;
+	else
+		msg->input_param_count = 3;
+
+	msg->output_param_count = 1;
+	do {
+		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+	} while (ret == -EBUSY && ctr++ < 100);
+
+	if (!ret)
+		return -EINPROGRESS;
+
+	if (!dma_mapping_error(dev, qat_req->phy_out))
+		dma_unmap_single(dev, qat_req->phy_out,
+				 sizeof(struct qat_rsa_output_params),
+				 DMA_TO_DEVICE);
+unmap_in_params:
+	if (!dma_mapping_error(dev, qat_req->phy_in))
+		dma_unmap_single(dev, qat_req->phy_in,
+				 sizeof(struct qat_rsa_input_params),
+				 DMA_TO_DEVICE);
+unmap_dst:
+	if (qat_req->dst_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
+				  qat_req->out.rsa.dec.m);
+	else
+		if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+			dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+					 ctx->key_sz, DMA_FROM_DEVICE);
+unmap_src:
+	if (qat_req->src_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+				  qat_req->in.rsa.dec.c);
+	else
+		if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+			dma_unmap_single(dev, qat_req->in.rsa.dec.c,
+					 ctx->key_sz, DMA_TO_DEVICE);
+	return ret;
+}
+
+static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
+			 size_t vlen)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+	int ret;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	ctx->key_sz = vlen;
+	ret = -EINVAL;
+	/* invalid key size provided */
+	if (!qat_rsa_enc_fn_id(ctx->key_sz))
+		goto err;
+
+	ret = -ENOMEM;
+	ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+	if (!ctx->n)
+		goto err;
+
+	memcpy(ctx->n, ptr, ctx->key_sz);
+	return 0;
+err:
+	ctx->key_sz = 0;
+	ctx->n = NULL;
+	return ret;
+}
+
+static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
+			 size_t vlen)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+		ctx->e = NULL;
+		return -EINVAL;
+	}
+
+	ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+	if (!ctx->e)
+		return -ENOMEM;
+
+	memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+	return 0;
+}
+
+static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
+			 size_t vlen)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+	int ret;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	ret = -EINVAL;
+	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+		goto err;
+
+	ret = -ENOMEM;
+	ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+	if (!ctx->d)
+		goto err;
+
+	memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+	return 0;
+err:
+	ctx->d = NULL;
+	return ret;
+}
+
+static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
+{
+	while (!**ptr && *len) {
+		(*ptr)++;
+		(*len)--;
+	}
+}
+
+static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr;
+	unsigned int len;
+	unsigned int half_key_sz = ctx->key_sz / 2;
+
+	/* p */
+	ptr = rsa_key->p;
+	len = rsa_key->p_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto err;
+	ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+	if (!ctx->p)
+		goto err;
+	memcpy(ctx->p + (half_key_sz - len), ptr, len);
+
+	/* q */
+	ptr = rsa_key->q;
+	len = rsa_key->q_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto free_p;
+	ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+	if (!ctx->q)
+		goto free_p;
+	memcpy(ctx->q + (half_key_sz - len), ptr, len);
+
+	/* dp */
+	ptr = rsa_key->dp;
+	len = rsa_key->dp_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto free_q;
+	ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+				      GFP_KERNEL);
+	if (!ctx->dp)
+		goto free_q;
+	memcpy(ctx->dp + (half_key_sz - len), ptr, len);
+
+	/* dq */
+	ptr = rsa_key->dq;
+	len = rsa_key->dq_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto free_dp;
+	ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+				      GFP_KERNEL);
+	if (!ctx->dq)
+		goto free_dp;
+	memcpy(ctx->dq + (half_key_sz - len), ptr, len);
+
+	/* qinv */
+	ptr = rsa_key->qinv;
+	len = rsa_key->qinv_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto free_dq;
+	ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+					GFP_KERNEL);
+	if (!ctx->qinv)
+		goto free_dq;
+	memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+
+	ctx->crt_mode = true;
+	return;
+
+free_dq:
+	memset(ctx->dq, '\0', half_key_sz);
+	dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+	ctx->dq = NULL;
+free_dp:
+	memset(ctx->dp, '\0', half_key_sz);
+	dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+	ctx->dp = NULL;
+free_q:
+	memset(ctx->q, '\0', half_key_sz);
+	dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+	ctx->q = NULL;
+free_p:
+	memset(ctx->p, '\0', half_key_sz);
+	dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+	ctx->p = NULL;
+err:
+	ctx->crt_mode = false;
+}
+
+static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
+{
+	unsigned int half_key_sz = ctx->key_sz / 2;
+
+	/* Free the old key if any */
+	if (ctx->n)
+		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+	if (ctx->e)
+		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+	if (ctx->d) {
+		memset(ctx->d, '\0', ctx->key_sz);
+		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+	}
+	if (ctx->p) {
+		memset(ctx->p, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+	}
+	if (ctx->q) {
+		memset(ctx->q, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+	}
+	if (ctx->dp) {
+		memset(ctx->dp, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+	}
+	if (ctx->dq) {
+		memset(ctx->dq, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+	}
+	if (ctx->qinv) {
+		memset(ctx->qinv, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
+	}
+
+	ctx->n = NULL;
+	ctx->e = NULL;
+	ctx->d = NULL;
+	ctx->p = NULL;
+	ctx->q = NULL;
+	ctx->dp = NULL;
+	ctx->dq = NULL;
+	ctx->qinv = NULL;
+	ctx->crt_mode = false;
+	ctx->key_sz = 0;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+			  unsigned int keylen, bool private)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+	struct rsa_key rsa_key;
+	int ret;
+
+	qat_rsa_clear_ctx(dev, ctx);
+
+	if (private)
+		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+	else
+		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+	if (ret < 0)
+		goto free;
+
+	ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
+	if (ret < 0)
+		goto free;
+	ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+	if (ret < 0)
+		goto free;
+	if (private) {
+		ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+		if (ret < 0)
+			goto free;
+		qat_rsa_setkey_crt(ctx, &rsa_key);
+	}
+
+	if (!ctx->n || !ctx->e) {
+		/* invalid key provided */
+		ret = -EINVAL;
+		goto free;
+	}
+	if (private && !ctx->d) {
+		/* invalid private key provided */
+		ret = -EINVAL;
+		goto free;
+	}
+
+	return 0;
+free:
+	qat_rsa_clear_ctx(dev, ctx);
+	return ret;
+}
+
+static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+			     unsigned int keylen)
+{
+	return qat_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+			      unsigned int keylen)
+{
+	return qat_rsa_setkey(tfm, key, keylen, true);
+}
+
+static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	return ctx->key_sz;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst =
+			qat_crypto_get_instance_node(get_current_node());
+
+	if (!inst)
+		return -EINVAL;
+
+	ctx->key_sz = 0;
+	ctx->inst = inst;
+	return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+	if (ctx->n)
+		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+	if (ctx->e)
+		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+	if (ctx->d) {
+		memset(ctx->d, '\0', ctx->key_sz);
+		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+	}
+	qat_crypto_put_instance(ctx->inst);
+	ctx->n = NULL;
+	ctx->e = NULL;
+	ctx->d = NULL;
+}
+
+static struct akcipher_alg rsa = {
+	.encrypt = qat_rsa_enc,
+	.decrypt = qat_rsa_dec,
+	.sign = qat_rsa_dec,
+	.verify = qat_rsa_enc,
+	.set_pub_key = qat_rsa_setpubkey,
+	.set_priv_key = qat_rsa_setprivkey,
+	.max_size = qat_rsa_max_size,
+	.init = qat_rsa_init_tfm,
+	.exit = qat_rsa_exit_tfm,
+	.reqsize = sizeof(struct qat_asym_request) + 64,
+	.base = {
+		.cra_name = "rsa",
+		.cra_driver_name = "qat-rsa",
+		.cra_priority = 1000,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct qat_rsa_ctx),
+	},
+};
+
+static struct kpp_alg dh = {
+	.set_secret = qat_dh_set_secret,
+	.generate_public_key = qat_dh_compute_value,
+	.compute_shared_secret = qat_dh_compute_value,
+	.max_size = qat_dh_max_size,
+	.init = qat_dh_init_tfm,
+	.exit = qat_dh_exit_tfm,
+	.reqsize = sizeof(struct qat_asym_request) + 64,
+	.base = {
+		.cra_name = "dh",
+		.cra_driver_name = "qat-dh",
+		.cra_priority = 1000,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct qat_dh_ctx),
+	},
+};
+
+int qat_asym_algs_register(void)
+{
+	int ret = 0;
+
+	mutex_lock(&algs_lock);
+	if (++active_devs == 1) {
+		rsa.base.cra_flags = 0;
+		ret = crypto_register_akcipher(&rsa);
+		if (ret)
+			goto unlock;
+		ret = crypto_register_kpp(&dh);
+	}
+unlock:
+	mutex_unlock(&algs_lock);
+	return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+	mutex_lock(&algs_lock);
+	if (--active_devs == 0) {
+		crypto_unregister_akcipher(&rsa);
+		crypto_unregister_kpp(&dh);
+	}
+	mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
new file mode 100644
index 0000000..3852d31
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -0,0 +1,370 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+	atomic_dec(&inst->refctr);
+	adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+	struct qat_crypto_instance *inst, *tmp;
+	int i;
+
+	list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
+		for (i = 0; i < atomic_read(&inst->refctr); i++)
+			qat_crypto_put_instance(inst);
+
+		if (inst->sym_tx)
+			adf_remove_ring(inst->sym_tx);
+
+		if (inst->sym_rx)
+			adf_remove_ring(inst->sym_rx);
+
+		if (inst->pke_tx)
+			adf_remove_ring(inst->pke_tx);
+
+		if (inst->pke_rx)
+			adf_remove_ring(inst->pke_rx);
+
+		list_del(&inst->list);
+		kfree(inst);
+	}
+	return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+	struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
+	struct qat_crypto_instance *inst = NULL, *tmp_inst;
+	unsigned long best = ~0;
+
+	list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+		unsigned long ctr;
+
+		if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
+		     dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
+		    adf_dev_started(tmp_dev) &&
+		    !list_empty(&tmp_dev->crypto_list)) {
+			ctr = atomic_read(&tmp_dev->ref_count);
+			if (best > ctr) {
+				accel_dev = tmp_dev;
+				best = ctr;
+			}
+		}
+	}
+
+	if (!accel_dev) {
+		pr_info("QAT: Could not find a device on node %d\n", node);
+		/* Get any started device */
+		list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+			if (adf_dev_started(tmp_dev) &&
+			    !list_empty(&tmp_dev->crypto_list)) {
+				accel_dev = tmp_dev;
+				break;
+			}
+		}
+	}
+
+	if (!accel_dev)
+		return NULL;
+
+	best = ~0;
+	list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
+		unsigned long ctr;
+
+		ctr = atomic_read(&tmp_inst->refctr);
+		if (best > ctr) {
+			inst = tmp_inst;
+			best = ctr;
+		}
+	}
+	if (inst) {
+		if (adf_dev_get(accel_dev)) {
+			dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+			return NULL;
+		}
+		atomic_inc(&inst->refctr);
+	}
+	return inst;
+}
+
+/**
+ * qat_crypto_dev_config() - create dev config required to create crypto inst.
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create crypto instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+	int cpus = num_online_cpus();
+	int banks = GET_MAX_BANKS(accel_dev);
+	int instances = min(cpus, banks);
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	int i;
+	unsigned long val;
+
+	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+		goto err;
+	if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+		goto err;
+	for (i = 0; i < instances; i++) {
+		val = i;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+			 i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+		val = 128;
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 512;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 0;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 2;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 8;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 10;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = ADF_COALESCING_DEF_TIME;
+		snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+		if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+						key, (void *)&val, ADF_DEC))
+			goto err;
+	}
+
+	val = i;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					ADF_NUM_CY, (void *)&val, ADF_DEC))
+		goto err;
+
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+	return 0;
+err:
+	dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+	int i;
+	unsigned long bank;
+	unsigned long num_inst, num_msg_sym, num_msg_asym;
+	int msg_size;
+	struct qat_crypto_instance *inst;
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	strlcpy(key, ADF_NUM_CY, sizeof(key));
+	if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+		return -EFAULT;
+
+	if (kstrtoul(val, 0, &num_inst))
+		return -EFAULT;
+
+	for (i = 0; i < num_inst; i++) {
+		inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+				    dev_to_node(&GET_DEV(accel_dev)));
+		if (!inst)
+			goto err;
+
+		list_add_tail(&inst->list, &accel_dev->crypto_list);
+		inst->id = i;
+		atomic_set(&inst->refctr, 0);
+		inst->accel_dev = accel_dev;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &bank))
+			goto err;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &num_msg_sym))
+			goto err;
+
+		num_msg_sym = num_msg_sym >> 1;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &num_msg_asym))
+			goto err;
+		num_msg_asym = num_msg_asym >> 1;
+
+		msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+				    msg_size, key, NULL, 0, &inst->sym_tx))
+			goto err;
+
+		msg_size = msg_size >> 1;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+				    msg_size, key, NULL, 0, &inst->pke_tx))
+			goto err;
+
+		msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+				    msg_size, key, qat_alg_callback, 0,
+				    &inst->sym_rx))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+				    msg_size, key, qat_alg_asym_callback, 0,
+				    &inst->pke_rx))
+			goto err;
+	}
+	return 0;
+err:
+	qat_crypto_free_instances(accel_dev);
+	return -ENOMEM;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+	if (qat_crypto_create_instances(accel_dev))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+	return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+				    enum adf_event event)
+{
+	int ret;
+
+	switch (event) {
+	case ADF_EVENT_INIT:
+		ret = qat_crypto_init(accel_dev);
+		break;
+	case ADF_EVENT_SHUTDOWN:
+		ret = qat_crypto_shutdown(accel_dev);
+		break;
+	case ADF_EVENT_RESTARTING:
+	case ADF_EVENT_RESTARTED:
+	case ADF_EVENT_START:
+	case ADF_EVENT_STOP:
+	default:
+		ret = 0;
+	}
+	return ret;
+}
+
+int qat_crypto_register(void)
+{
+	memset(&qat_crypto, 0, sizeof(qat_crypto));
+	qat_crypto.event_hld = qat_crypto_event_handler;
+	qat_crypto.name = "qat_crypto";
+	return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+	return adf_service_unregister(&qat_crypto);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
new file mode 100644
index 0000000..dc0273f
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -0,0 +1,93 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+
+struct qat_crypto_instance {
+	struct adf_etr_ring_data *sym_tx;
+	struct adf_etr_ring_data *sym_rx;
+	struct adf_etr_ring_data *pke_tx;
+	struct adf_etr_ring_data *pke_rx;
+	struct adf_accel_dev *accel_dev;
+	struct list_head list;
+	unsigned long state;
+	int id;
+	atomic_t refctr;
+};
+
+struct qat_crypto_request_buffs {
+	struct qat_alg_buf_list *bl;
+	dma_addr_t blp;
+	struct qat_alg_buf_list *blout;
+	dma_addr_t bloutp;
+	size_t sz;
+	size_t sz_out;
+};
+
+struct qat_crypto_request;
+
+struct qat_crypto_request {
+	struct icp_qat_fw_la_bulk_req req;
+	union {
+		struct qat_alg_aead_ctx *aead_ctx;
+		struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
+	};
+	union {
+		struct aead_request *aead_req;
+		struct ablkcipher_request *ablkcipher_req;
+	};
+	struct qat_crypto_request_buffs buf;
+	void (*cb)(struct icp_qat_fw_la_resp *resp,
+		   struct qat_crypto_request *req);
+};
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
new file mode 100644
index 0000000..ff149e1
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -0,0 +1,1441 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR	       0xffff
+#define MAX_RETRY_TIMES	   10000
+#define INIT_CTX_ARB_VALUE	0x0
+#define INIT_CTX_ENABLE_VALUE     0x0
+#define INIT_PC_VALUE	     0x0
+#define INIT_WAKEUP_EVENTS_VALUE  0x1
+#define INIT_SIG_EVENTS_VALUE     0x1
+#define INIT_CCENABLE_VALUE       0x2000
+#define RST_CSR_QAT_LSB	   20
+#define RST_CSR_AE_LSB		  0
+#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+	(~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+	(~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+	(inst = ((inst & 0xFFFF00C03FFull) | \
+		((((const_val) << 12) & 0x0FF00000ull) | \
+		(((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+	(inst = ((inst & 0xFFFF00FFF00ull) | \
+		((((const_val) << 12) & 0x0FF00000ull) | \
+		(((const_val) <<  0) & 0x000000FFull))))
+
+#define AE(handle, ae) handle->hal_handle->aes[ae]
+
+static const uint64_t inst_4b[] = {
+	0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+	0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0A021000000ull
+};
+
+static const uint64_t inst[] = {
+	0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+	0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+	0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+	0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+	0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+	0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+	0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+	0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+	0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+	0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+	0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+	0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+	0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+	0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+	0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+	0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+	0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+	0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+	0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+	0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+			  unsigned char ae, unsigned int ctx_mask)
+{
+	AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned int csr)
+{
+	unsigned int iterations = CSR_RETRY_TIMES;
+	int value;
+
+	do {
+		value = GET_AE_CSR(handle, ae, csr);
+		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+			return value;
+	} while (iterations--);
+
+	pr_err("QAT: Read CSR timeout\n");
+	return 0;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned int csr,
+			     unsigned int value)
+{
+	unsigned int iterations = CSR_RETRY_TIMES;
+
+	do {
+		SET_AE_CSR(handle, ae, csr, value);
+		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+			return 0;
+	} while (iterations--);
+
+	pr_err("QAT: Write CSR Timeout\n");
+	return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+				     unsigned char ae, unsigned char ctx,
+				     unsigned int *events)
+{
+	unsigned int cur_ctx;
+
+	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+	*events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int cycles,
+			       int chk_inactive)
+{
+	unsigned int base_cnt = 0, cur_cnt = 0;
+	unsigned int csr = (1 << ACS_ABO_BITPOS);
+	int times = MAX_RETRY_TIMES;
+	int elapsed_cycles = 0;
+
+	base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+	base_cnt &= 0xffff;
+	while ((int)cycles > elapsed_cycles && times--) {
+		if (chk_inactive)
+			csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+
+		cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+		cur_cnt &= 0xffff;
+		elapsed_cycles = cur_cnt - base_cnt;
+
+		if (elapsed_cycles < 0)
+			elapsed_cycles += 0x10000;
+
+		/* ensure at least 8 time cycles elapsed in wait_cycles */
+		if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+			return 0;
+	}
+	if (times < 0) {
+		pr_err("QAT: wait_num_cycles time out\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
+#define SET_BIT(wrd, bit) (wrd | 1 << bit)
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+			    unsigned char ae, unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	if ((mode != 4) && (mode != 8)) {
+		pr_err("QAT: bad ctx mode=%d\n", mode);
+		return -EINVAL;
+	}
+
+	/* Sets the accelaration engine context mode to either four or eight */
+	csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	csr = IGNORE_W1C_MASK & csr;
+	new_csr = (mode == 4) ?
+		SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+		CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+	return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	csr &= IGNORE_W1C_MASK;
+
+	new_csr = (mode) ?
+		SET_BIT(csr, CE_NN_MODE_BITPOS) :
+		CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+	if (new_csr != csr)
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+	return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
+			   unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	csr &= IGNORE_W1C_MASK;
+	switch (lm_type) {
+	case ICP_LMEM0:
+		new_csr = (mode) ?
+			SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+			CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+		break;
+	case ICP_LMEM1:
+		new_csr = (mode) ?
+			SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+			CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+		break;
+	default:
+		pr_err("QAT: lmType = 0x%x\n", lm_type);
+		return -EINVAL;
+	}
+
+	if (new_csr != csr)
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+	return 0;
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+					   unsigned short reg_num)
+{
+	unsigned short reg_addr;
+
+	switch (type) {
+	case ICP_GPA_ABS:
+	case ICP_GPB_ABS:
+		reg_addr = 0x80 | (reg_num & 0x7f);
+		break;
+	case ICP_GPA_REL:
+	case ICP_GPB_REL:
+		reg_addr = reg_num & 0x1f;
+		break;
+	case ICP_SR_RD_REL:
+	case ICP_SR_WR_REL:
+	case ICP_SR_REL:
+		reg_addr = 0x180 | (reg_num & 0x1f);
+		break;
+	case ICP_SR_ABS:
+		reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+		break;
+	case ICP_DR_RD_REL:
+	case ICP_DR_WR_REL:
+	case ICP_DR_REL:
+		reg_addr = 0x1c0 | (reg_num & 0x1f);
+		break;
+	case ICP_DR_ABS:
+		reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+		break;
+	case ICP_NEIGH_REL:
+		reg_addr = 0x280 | (reg_num & 0x1f);
+		break;
+	case ICP_LMEM0:
+		reg_addr = 0x200;
+		break;
+	case ICP_LMEM1:
+		reg_addr = 0x220;
+		break;
+	case ICP_NO_DEST:
+		reg_addr = 0x300 | (reg_num & 0xff);
+		break;
+	default:
+		reg_addr = BAD_REGADDR;
+		break;
+	}
+	return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int ae_reset_csr;
+
+	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+	ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
+	ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
+	SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned int ctx_mask,
+				unsigned int ae_csr, unsigned int csr_val)
+{
+	unsigned int ctx, cur_ctx;
+
+	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+	}
+
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned char ctx,
+				unsigned int ae_csr)
+{
+	unsigned int cur_ctx, csr_val;
+
+	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+	csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+	return csr_val;
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+				  unsigned char ae, unsigned int ctx_mask,
+				  unsigned int events)
+{
+	unsigned int ctx, cur_ctx;
+
+	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+	}
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+				     unsigned char ae, unsigned int ctx_mask,
+				     unsigned int events)
+{
+	unsigned int ctx, cur_ctx;
+
+	cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+				  events);
+	}
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int base_cnt, cur_cnt;
+	unsigned char ae;
+	int times = MAX_RETRY_TIMES;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+		base_cnt &= 0xffff;
+
+		do {
+			cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+			cur_cnt &= 0xffff;
+		} while (times-- && (cur_cnt == base_cnt));
+
+		if (times < 0) {
+			pr_err("QAT: AE%d is inactive!!\n", ae);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+			    unsigned int ae)
+{
+	unsigned int enable = 0, active = 0;
+
+	enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+	if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
+	    (active & (1 << ACS_ABO_BITPOS)))
+		return 1;
+	else
+		return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int misc_ctl;
+	unsigned char ae;
+
+	/* stop the timestamp timers */
+	misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
+	if (misc_ctl & MC_TIMESTAMP_ENABLE)
+		SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
+			    (~MC_TIMESTAMP_ENABLE));
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+	}
+	/* start timestamp timers */
+	SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT	BIT(2)
+#define ESRAM_AUTO_TINIT_DONE	BIT(3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+	void __iomem *csr_addr =
+			(void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
+			ESRAM_AUTO_INIT_CSR_OFFSET);
+	unsigned int csr_val;
+	int times = 30;
+
+	if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
+		return 0;
+
+	csr_val = ADF_CSR_RD(csr_addr, 0);
+	if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+		return 0;
+
+	csr_val = ADF_CSR_RD(csr_addr, 0);
+	csr_val |= ESRAM_AUTO_TINIT;
+	ADF_CSR_WR(csr_addr, 0, csr_val);
+
+	do {
+		qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+		csr_val = ADF_CSR_RD(csr_addr, 0);
+	} while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+	if ((times < 0)) {
+		pr_err("QAT: Fail to init eSram!\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int ae_reset_csr;
+	unsigned char ae;
+	unsigned int clk_csr;
+	unsigned int times = 100;
+	unsigned int csr;
+
+	/* write to the reset csr */
+	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+	ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
+	ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
+	do {
+		SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+		if (!(times--))
+			goto out_err;
+		csr = GET_GLB_CSR(handle, ICP_RESET);
+	} while ((handle->hal_handle->ae_mask |
+		 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
+	/* enable clock */
+	clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
+	clk_csr |= handle->hal_handle->ae_mask << 0;
+	clk_csr |= handle->hal_handle->slice_mask << 20;
+	SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
+	if (qat_hal_check_ae_alive(handle))
+		goto out_err;
+
+	/* Set undefined power-up/reset states to reasonable default values */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+				  INIT_CTX_ENABLE_VALUE);
+		qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+				    CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+		qat_hal_put_wakeup_event(handle, ae,
+					 ICP_QAT_UCLO_AE_ALL_CTX,
+					 INIT_WAKEUP_EVENTS_VALUE);
+		qat_hal_put_sig_event(handle, ae,
+				      ICP_QAT_UCLO_AE_ALL_CTX,
+				      INIT_SIG_EVENTS_VALUE);
+	}
+	if (qat_hal_init_esram(handle))
+		goto out_err;
+	if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+		goto out_err;
+	qat_hal_reset_timestamp(handle);
+
+	return 0;
+out_err:
+	pr_err("QAT: failed to get device out of reset\n");
+	return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned int ctx_mask)
+{
+	unsigned int ctx;
+
+	ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	ctx &= IGNORE_W1C_MASK &
+		(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static uint64_t qat_hal_parity_64bit(uint64_t word)
+{
+	word ^= word >> 1;
+	word ^= word >> 2;
+	word ^= word >> 4;
+	word ^= word >> 8;
+	word ^= word >> 16;
+	word ^= word >> 32;
+	return word & 1;
+}
+
+static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
+{
+	uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+		bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+		bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+		bit6_mask = 0xdaf69a46910ULL;
+
+	/* clear the ecc bits */
+	uword &= ~(0x7fULL << 0x2C);
+	uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+	uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+	uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+	uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+	uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+	uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+	uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+	return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+		       unsigned char ae, unsigned int uaddr,
+		       unsigned int words_num, uint64_t *uword)
+{
+	unsigned int ustore_addr;
+	unsigned int i;
+
+	ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+	uaddr |= UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	for (i = 0; i < words_num; i++) {
+		unsigned int uwrd_lo, uwrd_hi;
+		uint64_t tmp;
+
+		tmp = qat_hal_set_uword_ecc(uword[i]);
+		uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+		uwrd_hi = (unsigned int)(tmp >> 0x20);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	}
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int ctx_mask)
+{
+	unsigned int ctx;
+
+	ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	ctx &= IGNORE_W1C_MASK;
+	ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+	ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae;
+	unsigned short reg;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+			qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+					     reg, 0);
+			qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+					     reg, 0);
+		}
+	}
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae;
+	unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+	int times = MAX_RETRY_TIMES;
+	unsigned int csr_val = 0;
+	unsigned int savctx = 0;
+	int ret = 0;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+		csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+		csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+		csr_val &= IGNORE_W1C_MASK;
+		csr_val |= CE_NN_MODE;
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+		qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+				  (uint64_t *)inst);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+		qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+				    CTX_SIG_EVENTS_INDIRECT, 0);
+		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+		qat_hal_enable_ctx(handle, ae, ctx_mask);
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		/* wait for AE to finish */
+		do {
+			ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+		} while (ret && times--);
+
+		if (times < 0) {
+			pr_err("QAT: clear GPR of AE %d failed", ae);
+			return -EINVAL;
+		}
+		qat_hal_disable_ctx(handle, ae, ctx_mask);
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  savctx & ACS_ACNO);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+				  INIT_CTX_ENABLE_VALUE);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+		qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+					 INIT_WAKEUP_EVENTS_VALUE);
+		qat_hal_put_sig_event(handle, ae, ctx_mask,
+				      INIT_SIG_EVENTS_VALUE);
+	}
+	return 0;
+}
+
+#define ICP_QAT_AE_OFFSET	0x20000
+#define ICP_QAT_CAP_OFFSET       (ICP_QAT_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET    0x800
+#define ICP_QAT_EP_OFFSET	0x3a000
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+	unsigned char ae;
+	unsigned int max_en_ae_id = 0;
+	struct icp_qat_fw_loader_handle *handle;
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *misc_bar =
+			&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+	struct adf_bar *sram_bar;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->hal_cap_g_ctl_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_CAP_OFFSET);
+	handle->hal_cap_ae_xfer_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_AE_OFFSET);
+	handle->hal_ep_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_EP_OFFSET);
+	handle->hal_cap_ae_local_csr_addr_v =
+		(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
+				 LOCAL_TO_XFER_REG_OFFSET);
+	handle->pci_dev = pci_info->pci_dev;
+	if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
+		sram_bar =
+			&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
+		handle->hal_sram_addr_v = sram_bar->virt_addr;
+	}
+	handle->fw_auth = (handle->pci_dev->device ==
+			   ADF_DH895XCC_PCI_DEVICE_ID) ? false : true;
+	handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+	if (!handle->hal_handle)
+		goto out_hal_handle;
+	handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+	handle->hal_handle->ae_mask = hw_data->ae_mask;
+	handle->hal_handle->slice_mask = hw_data->accel_mask;
+	/* create AE objects */
+	handle->hal_handle->upc_mask = 0x1ffff;
+	handle->hal_handle->max_ustore = 0x4000;
+	for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
+		if (!(hw_data->ae_mask & (1 << ae)))
+			continue;
+		handle->hal_handle->aes[ae].free_addr = 0;
+		handle->hal_handle->aes[ae].free_size =
+		    handle->hal_handle->max_ustore;
+		handle->hal_handle->aes[ae].ustore_size =
+		    handle->hal_handle->max_ustore;
+		handle->hal_handle->aes[ae].live_ctx_mask =
+						ICP_QAT_UCLO_AE_ALL_CTX;
+		max_en_ae_id = ae;
+	}
+	handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+	/* take all AEs out of reset */
+	if (qat_hal_clr_reset(handle)) {
+		dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
+		goto out_err;
+	}
+	qat_hal_clear_xfer(handle);
+	if (!handle->fw_auth) {
+		if (qat_hal_clear_gpr(handle))
+			goto out_err;
+	}
+
+	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		unsigned int csr_val = 0;
+
+		csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
+		csr_val |= 0x1;
+		qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+	}
+	accel_dev->fw_loader->fw_loader = handle;
+	return 0;
+
+out_err:
+	kfree(handle->hal_handle);
+out_hal_handle:
+	kfree(handle);
+	return -EFAULT;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+	if (!handle)
+		return;
+	kfree(handle->hal_handle);
+	kfree(handle);
+}
+
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		   unsigned int ctx_mask)
+{
+	int retry = 0;
+	unsigned int fcu_sts = 0;
+
+	if (handle->fw_auth) {
+		SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START);
+		do {
+			msleep(FW_AUTH_WAIT_PERIOD);
+			fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+			if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
+				return;
+		} while (retry++ < FW_AUTH_MAX_RETRY);
+		pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae,
+		       fcu_sts);
+	} else {
+		qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
+				 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
+		qat_hal_enable_ctx(handle, ae, ctx_mask);
+	}
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		  unsigned int ctx_mask)
+{
+	if (!handle->fw_auth)
+		qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+	qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+			    handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int uaddr,
+			       unsigned int words_num, uint64_t *uword)
+{
+	unsigned int i, uwrd_lo, uwrd_hi;
+	unsigned int ustore_addr, misc_control;
+
+	misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+			  misc_control & 0xfffffffb);
+	ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+	uaddr |= UA_ECS;
+	for (i = 0; i < words_num; i++) {
+		qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+		uaddr++;
+		uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+		uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
+		uword[i] = uwrd_hi;
+		uword[i] = (uword[i] << 0x20) | uwrd_lo;
+	}
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned int uaddr,
+		     unsigned int words_num, unsigned int *data)
+{
+	unsigned int i, ustore_addr;
+
+	ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+	uaddr |= UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	for (i = 0; i < words_num; i++) {
+		unsigned int uwrd_lo, uwrd_hi, tmp;
+
+		uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+			  ((data[i] & 0xff00) << 2) |
+			  (0x3 << 8) | (data[i] & 0xff);
+		uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+		uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+		tmp = ((data[i] >> 0x10) & 0xffff);
+		uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	}
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   uint64_t *micro_inst, unsigned int inst_num,
+				   int code_off, unsigned int max_cycle,
+				   unsigned int *endpc)
+{
+	uint64_t savuwords[MAX_EXEC_INST];
+	unsigned int ind_lm_addr0, ind_lm_addr1;
+	unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
+	unsigned int ind_cnt_sig;
+	unsigned int ind_sig, act_sig;
+	unsigned int csr_val = 0, newcsr_val;
+	unsigned int savctx;
+	unsigned int savcc, wakeup_events, savpc;
+	unsigned int ctxarb_ctl, ctx_enables;
+
+	if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+		pr_err("QAT: invalid instruction num %d\n", inst_num);
+		return -EINVAL;
+	}
+	/* save current context */
+	ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+	ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+	ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+						INDIRECT_LM_ADDR_0_BYTE_INDEX);
+	ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+						INDIRECT_LM_ADDR_1_BYTE_INDEX);
+	if (inst_num <= MAX_EXEC_INST)
+		qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+	qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+	savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
+	savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	ctx_enables &= IGNORE_W1C_MASK;
+	savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+	savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+	ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+	ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+					  FUTURE_COUNT_SIGNAL_INDIRECT);
+	ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+				      CTX_SIG_EVENTS_INDIRECT);
+	act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
+	/* execute micro codes */
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+	if (code_off)
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+	qat_hal_enable_ctx(handle, ae, (1 << ctx));
+	/* wait for micro codes to finish */
+	if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+		return -EFAULT;
+	if (endpc) {
+		unsigned int ctx_status;
+
+		ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+						 CTX_STS_INDIRECT);
+		*endpc = ctx_status & handle->hal_handle->upc_mask;
+	}
+	/* retore to saved context */
+	qat_hal_disable_ctx(handle, ae, (1 << ctx));
+	if (inst_num <= MAX_EXEC_INST)
+		qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+			    handle->hal_handle->upc_mask & savpc);
+	csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+	newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+	qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    LM_ADDR_0_INDIRECT, ind_lm_addr0);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    LM_ADDR_1_INDIRECT, ind_lm_addr1);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    CTX_SIG_EVENTS_INDIRECT, ind_sig);
+	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+	return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      enum icp_qat_uof_regtype reg_type,
+			      unsigned short reg_num, unsigned int *data)
+{
+	unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+	unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+	unsigned short reg_addr;
+	int status = 0;
+	uint64_t insts, savuword;
+
+	reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (reg_addr == BAD_REGADDR) {
+		pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+		return -EINVAL;
+	}
+	switch (reg_type) {
+	case ICP_GPA_REL:
+		insts = 0xA070000000ull | (reg_addr & 0x3ff);
+		break;
+	default:
+		insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+		break;
+	}
+	savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+	ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	ctx_enables &= IGNORE_W1C_MASK;
+	if (ctx != (savctx & ACS_ACNO))
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  ctx & ACS_ACNO);
+	qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+	uaddr = UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	insts = qat_hal_set_uword_ecc(insts);
+	uwrd_lo = (unsigned int)(insts & 0xffffffff);
+	uwrd_hi = (unsigned int)(insts >> 0x20);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	/* delay for at least 8 cycles */
+	qat_hal_wait_cycles(handle, ae, 0x8, 0);
+	/*
+	 * read ALU output
+	 * the instruction should have been executed
+	 * prior to clearing the ECS in putUwords
+	 */
+	*data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+	qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+	if (ctx != (savctx & ACS_ACNO))
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  savctx & ACS_ACNO);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+	return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      enum icp_qat_uof_regtype reg_type,
+			      unsigned short reg_num, unsigned int data)
+{
+	unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+	uint64_t insts[] = {
+		0x0F440000000ull,
+		0x0F040000000ull,
+		0x0F0000C0300ull,
+		0x0E000010000ull
+	};
+	const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+	const int imm_w1 = 0, imm_w0 = 1;
+
+	dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (dest_addr == BAD_REGADDR) {
+		pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+		return -EINVAL;
+	}
+
+	data16lo = 0xffff & data;
+	data16hi = 0xffff & (data >> 0x10);
+	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+					  (0xff & data16hi));
+	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+					   (0xff & data16lo));
+	switch (reg_type) {
+	case ICP_GPA_REL:
+		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+		    ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+		    ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+		break;
+	default:
+		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+		    ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+		    ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+		break;
+	}
+
+	return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+				       code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+	return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(uint64_t *micro_inst,
+				     unsigned int inst_num, unsigned int size,
+				     unsigned int addr, unsigned int *value)
+{
+	int i;
+	unsigned int cur_value;
+	const uint64_t *inst_arr;
+	int fixup_offset;
+	int usize = 0;
+	int orig_num;
+
+	orig_num = inst_num;
+	cur_value = value[0];
+	inst_arr = inst_4b;
+	usize = ARRAY_SIZE(inst_4b);
+	fixup_offset = inst_num;
+	for (i = 0; i < usize; i++)
+		micro_inst[inst_num++] = inst_arr[i];
+	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+	fixup_offset++;
+	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+	fixup_offset++;
+	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+	fixup_offset++;
+	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+	return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+				      unsigned char ae, unsigned char ctx,
+				      int *pfirst_exec, uint64_t *micro_inst,
+				      unsigned int inst_num)
+{
+	int stat = 0;
+	unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+	unsigned int gprb0 = 0, gprb1 = 0;
+
+	if (*pfirst_exec) {
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+		*pfirst_exec = 0;
+	}
+	stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+				       inst_num * 0x5, NULL);
+	if (stat != 0)
+		return -EFAULT;
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+	return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+			unsigned char ae,
+			struct icp_qat_uof_batch_init *lm_init_header)
+{
+	struct icp_qat_uof_batch_init *plm_init;
+	uint64_t *micro_inst_arry;
+	int micro_inst_num;
+	int alloc_inst_size;
+	int first_exec = 1;
+	int stat = 0;
+
+	plm_init = lm_init_header->next;
+	alloc_inst_size = lm_init_header->size;
+	if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+		alloc_inst_size = handle->hal_handle->max_ustore;
+	micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+					GFP_KERNEL);
+	if (!micro_inst_arry)
+		return -ENOMEM;
+	micro_inst_num = 0;
+	while (plm_init) {
+		unsigned int addr, *value, size;
+
+		ae = plm_init->ae;
+		addr = plm_init->addr;
+		value = plm_init->value;
+		size = plm_init->size;
+		micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+							    micro_inst_num,
+							    size, addr, value);
+		plm_init = plm_init->next;
+	}
+	/* exec micro codes */
+	if (micro_inst_arry && (micro_inst_num > 0)) {
+		micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+		stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+						  micro_inst_arry,
+						  micro_inst_num);
+	}
+	kfree(micro_inst_arry);
+	return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   enum icp_qat_uof_regtype reg_type,
+				   unsigned short reg_num, unsigned int val)
+{
+	int status = 0;
+	unsigned int reg_addr;
+	unsigned int ctx_enables;
+	unsigned short mask;
+	unsigned short dr_offset = 0x10;
+
+	status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	if (CE_INUSE_CONTEXTS & ctx_enables) {
+		if (ctx & 0x1) {
+			pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+			return -EINVAL;
+		}
+		mask = 0x1f;
+		dr_offset = 0x20;
+	} else {
+		mask = 0x0f;
+	}
+	if (reg_num & ~mask)
+		return -EINVAL;
+	reg_addr = reg_num + (ctx << 0x5);
+	switch (reg_type) {
+	case ICP_SR_RD_REL:
+	case ICP_SR_REL:
+		SET_AE_XFER(handle, ae, reg_addr, val);
+		break;
+	case ICP_DR_RD_REL:
+	case ICP_DR_REL:
+		SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+		break;
+	default:
+		status = -EINVAL;
+		break;
+	}
+	return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   enum icp_qat_uof_regtype reg_type,
+				   unsigned short reg_num, unsigned int data)
+{
+	unsigned int gprval, ctx_enables;
+	unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+	    data16low;
+	unsigned short reg_mask;
+	int status = 0;
+	uint64_t micro_inst[] = {
+		0x0F440000000ull,
+		0x0F040000000ull,
+		0x0A000000000ull,
+		0x0F0000C0300ull,
+		0x0E000010000ull
+	};
+	const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+	const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	if (CE_INUSE_CONTEXTS & ctx_enables) {
+		if (ctx & 0x1) {
+			pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+			return -EINVAL;
+		}
+		reg_mask = (unsigned short)~0x1f;
+	} else {
+		reg_mask = (unsigned short)~0xf;
+	}
+	if (reg_num & reg_mask)
+		return -EINVAL;
+	xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (xfr_addr == BAD_REGADDR) {
+		pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+		return -EINVAL;
+	}
+	qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+	gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+	data16low = 0xffff & data;
+	data16hi = 0xffff & (data >> 0x10);
+	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+					  (unsigned short)(0xff & data16hi));
+	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+					   (unsigned short)(0xff & data16low));
+	micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+	    ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+	micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+	    ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+	micro_inst[0x2] = micro_inst[0x2] |
+	    ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+	status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+					 code_off, dly, NULL);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+	return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      unsigned short nn, unsigned int val)
+{
+	unsigned int ctx_enables;
+	int stat = 0;
+
+	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	ctx_enables &= IGNORE_W1C_MASK;
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+	stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+				      *handle, unsigned char ae,
+				      unsigned short absreg_num,
+				      unsigned short *relreg,
+				      unsigned char *ctx)
+{
+	unsigned int ctx_enables;
+
+	ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+	if (ctx_enables & CE_INUSE_CONTEXTS) {
+		/* 4-ctx mode */
+		*relreg = absreg_num & 0x1F;
+		*ctx = (absreg_num >> 0x4) & 0x6;
+	} else {
+		/* 8-ctx mode */
+		*relreg = absreg_num & 0x0F;
+		*ctx = (absreg_num >> 0x4) & 0x7;
+	}
+	return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned char ctx_mask,
+		     enum icp_qat_uof_regtype reg_type,
+		     unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 1;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+		if (stat) {
+			pr_err("QAT: write gpr fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 3;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+					       regdata);
+		if (stat) {
+			pr_err("QAT: write wr xfer fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 3;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+					       regdata);
+		if (stat) {
+			pr_err("QAT: write rd xfer fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned char ctx_mask,
+		    unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned char ctx;
+
+	if (ctx_mask == 0)
+		return -EINVAL;
+
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+			continue;
+		stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+		if (stat) {
+			pr_err("QAT: write neigh error\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
new file mode 100644
index 0000000..6bd8f6a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -0,0 +1,1673 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+				 unsigned int ae, unsigned int image_num)
+{
+	struct icp_qat_uclo_aedata *ae_data;
+	struct icp_qat_uclo_encapme *encap_image;
+	struct icp_qat_uclo_page *page = NULL;
+	struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+	ae_data = &obj_handle->ae_data[ae];
+	encap_image = &obj_handle->ae_uimage[image_num];
+	ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+	ae_slice->encap_image = encap_image;
+
+	if (encap_image->img_ptr) {
+		ae_slice->ctx_mask_assigned =
+					encap_image->img_ptr->ctx_assigned;
+		ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+	} else {
+		ae_slice->ctx_mask_assigned = 0;
+	}
+	ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+	if (!ae_slice->region)
+		return -ENOMEM;
+	ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+	if (!ae_slice->page)
+		goto out_err;
+	page = ae_slice->page;
+	page->encap_page = encap_image->page;
+	ae_slice->page->region = ae_slice->region;
+	ae_data->slice_num++;
+	return 0;
+out_err:
+	kfree(ae_slice->region);
+	ae_slice->region = NULL;
+	return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+	unsigned int i;
+
+	if (!ae_data) {
+		pr_err("QAT: bad argument, ae_data is NULL\n ");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ae_data->slice_num; i++) {
+		kfree(ae_data->ae_slices[i].region);
+		ae_data->ae_slices[i].region = NULL;
+		kfree(ae_data->ae_slices[i].page);
+		ae_data->ae_slices[i].page = NULL;
+	}
+	return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+				 unsigned int str_offset)
+{
+	if ((!str_table->table_len) || (str_offset > str_table->table_len))
+		return NULL;
+	return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
+{
+	int maj = hdr->maj_ver & 0xff;
+	int min = hdr->min_ver & 0xff;
+
+	if (hdr->file_id != ICP_QAT_UOF_FID) {
+		pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+		return -EINVAL;
+	}
+	if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+		pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+		       maj, min);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
+{
+	int maj = suof_hdr->maj_ver & 0xff;
+	int min = suof_hdr->min_ver & 0xff;
+
+	if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
+		pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+		return -EINVAL;
+	}
+	if (suof_hdr->fw_type != 0) {
+		pr_err("QAT: unsupported firmware type\n");
+		return -EINVAL;
+	}
+	if (suof_hdr->num_chunks <= 0x1) {
+		pr_err("QAT: SUOF chunk amount is incorrect\n");
+		return -EINVAL;
+	}
+	if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
+		pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
+		       maj, min);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+				      unsigned int addr, unsigned int *val,
+				      unsigned int num_in_bytes)
+{
+	unsigned int outval;
+	unsigned char *ptr = (unsigned char *)val;
+
+	while (num_in_bytes) {
+		memcpy(&outval, ptr, 4);
+		SRAM_WRITE(handle, addr, outval);
+		num_in_bytes -= 4;
+		ptr += 4;
+		addr += 4;
+	}
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+				      unsigned char ae, unsigned int addr,
+				      unsigned int *val,
+				      unsigned int num_in_bytes)
+{
+	unsigned int outval;
+	unsigned char *ptr = (unsigned char *)val;
+
+	addr >>= 0x2; /* convert to uword address */
+
+	while (num_in_bytes) {
+		memcpy(&outval, ptr, 4);
+		qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+		num_in_bytes -= 4;
+		ptr += 4;
+	}
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae,
+				   struct icp_qat_uof_batch_init
+				   *umem_init_header)
+{
+	struct icp_qat_uof_batch_init *umem_init;
+
+	if (!umem_init_header)
+		return;
+	umem_init = umem_init_header->next;
+	while (umem_init) {
+		unsigned int addr, *value, size;
+
+		ae = umem_init->ae;
+		addr = umem_init->addr;
+		value = umem_init->value;
+		size = umem_init->size;
+		qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+		umem_init = umem_init->next;
+	}
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+				 struct icp_qat_uof_batch_init **base)
+{
+	struct icp_qat_uof_batch_init *umem_init;
+
+	umem_init = *base;
+	while (umem_init) {
+		struct icp_qat_uof_batch_init *pre;
+
+		pre = umem_init;
+		umem_init = umem_init->next;
+		kfree(pre);
+	}
+	*base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+	char buf[16] = {0};
+	unsigned long ae = 0;
+	int i;
+
+	strncpy(buf, str, 15);
+	for (i = 0; i < 16; i++) {
+		if (!isdigit(buf[i])) {
+			buf[i] = '\0';
+			break;
+		}
+	}
+	if ((kstrtoul(buf, 10, &ae)))
+		return -EFAULT;
+
+	*num = (unsigned int)ae;
+	return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+				     struct icp_qat_uof_initmem *init_mem,
+				     unsigned int size_range, unsigned int *ae)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	char *str;
+
+	if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+		pr_err("QAT: initmem is out of range");
+		return -EINVAL;
+	}
+	if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+		pr_err("QAT: Memory scope for init_mem error\n");
+		return -EINVAL;
+	}
+	str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+	if (!str) {
+		pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+		return -EINVAL;
+	}
+	if (qat_uclo_parse_num(str, ae)) {
+		pr_err("QAT: Parse num for AE number failed\n");
+		return -EINVAL;
+	}
+	if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+		pr_err("QAT: ae %d out of range\n", *ae);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+					   *handle, struct icp_qat_uof_initmem
+					   *init_mem, unsigned int ae,
+					   struct icp_qat_uof_batch_init
+					   **init_tab_base)
+{
+	struct icp_qat_uof_batch_init *init_header, *tail;
+	struct icp_qat_uof_batch_init *mem_init, *tail_old;
+	struct icp_qat_uof_memvar_attr *mem_val_attr;
+	unsigned int i, flag = 0;
+
+	mem_val_attr =
+		(struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
+		sizeof(struct icp_qat_uof_initmem));
+
+	init_header = *init_tab_base;
+	if (!init_header) {
+		init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+		if (!init_header)
+			return -ENOMEM;
+		init_header->size = 1;
+		*init_tab_base = init_header;
+		flag = 1;
+	}
+	tail_old = init_header;
+	while (tail_old->next)
+		tail_old = tail_old->next;
+	tail = tail_old;
+	for (i = 0; i < init_mem->val_attr_num; i++) {
+		mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+		if (!mem_init)
+			goto out_err;
+		mem_init->ae = ae;
+		mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+		mem_init->value = &mem_val_attr->value;
+		mem_init->size = 4;
+		mem_init->next = NULL;
+		tail->next = mem_init;
+		tail = mem_init;
+		init_header->size += qat_hal_get_ins_num();
+		mem_val_attr++;
+	}
+	return 0;
+out_err:
+	while (tail_old) {
+		mem_init = tail_old->next;
+		kfree(tail_old);
+		tail_old = mem_init;
+	}
+	if (flag)
+		kfree(*init_tab_base);
+	return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+				  struct icp_qat_uof_initmem *init_mem)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae;
+
+	if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+				      ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
+		return -EINVAL;
+	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+					    &obj_handle->lm_init_tab[ae]))
+		return -EINVAL;
+	return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+				  struct icp_qat_uof_initmem *init_mem)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae, ustore_size, uaddr, i;
+
+	ustore_size = obj_handle->ustore_phy_size;
+	if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+		return -EINVAL;
+	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+					    &obj_handle->umem_init_tab[ae]))
+		return -EINVAL;
+	/* set the highest ustore address referenced */
+	uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+	for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
+		if (obj_handle->ae_data[ae].ae_slices[i].
+		    encap_image->uwords_num < uaddr)
+			obj_handle->ae_data[ae].ae_slices[i].
+			encap_image->uwords_num = uaddr;
+	}
+	return 0;
+}
+
+#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+				   struct icp_qat_uof_initmem *init_mem)
+{
+	switch (init_mem->region) {
+	case ICP_QAT_UOF_LMEM_REGION:
+		if (qat_uclo_init_lmem_seg(handle, init_mem))
+			return -EINVAL;
+		break;
+	case ICP_QAT_UOF_UMEM_REGION:
+		if (qat_uclo_init_umem_seg(handle, init_mem))
+			return -EINVAL;
+		break;
+	default:
+		pr_err("QAT: initmem region error. region type=0x%x\n",
+		       init_mem->region);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+				struct icp_qat_uclo_encapme *image)
+{
+	unsigned int i;
+	struct icp_qat_uclo_encap_page *page;
+	struct icp_qat_uof_image *uof_image;
+	unsigned char ae;
+	unsigned int ustore_size;
+	unsigned int patt_pos;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	uint64_t *fill_data;
+
+	uof_image = image->img_ptr;
+	fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
+			    GFP_KERNEL);
+	if (!fill_data)
+		return -ENOMEM;
+	for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+		memcpy(&fill_data[i], &uof_image->fill_pattern,
+		       sizeof(uint64_t));
+	page = image->page;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+			continue;
+		ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+		patt_pos = page->beg_addr_p + page->micro_words_num;
+
+		qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+				  page->beg_addr_p, &fill_data[0]);
+		qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+				  ustore_size - patt_pos + 1,
+				  &fill_data[page->beg_addr_p]);
+	}
+	kfree(fill_data);
+	return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+	int i, ae;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+
+	for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+		if (initmem->num_in_bytes) {
+			if (qat_uclo_init_ae_memory(handle, initmem))
+				return -EINVAL;
+		}
+		initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
+			(uintptr_t)initmem +
+			sizeof(struct icp_qat_uof_initmem)) +
+			(sizeof(struct icp_qat_uof_memvar_attr) *
+			initmem->val_attr_num));
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (qat_hal_batch_wr_lm(handle, ae,
+					obj_handle->lm_init_tab[ae])) {
+			pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+			return -EINVAL;
+		}
+		qat_uclo_cleanup_batch_init_list(handle,
+						 &obj_handle->lm_init_tab[ae]);
+		qat_uclo_batch_wr_umem(handle, ae,
+				       obj_handle->umem_init_tab[ae]);
+		qat_uclo_cleanup_batch_init_list(handle,
+						 &obj_handle->
+						 umem_init_tab[ae]);
+	}
+	return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+				 char *chunk_id, void *cur)
+{
+	int i;
+	struct icp_qat_uof_chunkhdr *chunk_hdr =
+	    (struct icp_qat_uof_chunkhdr *)
+	    ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+	for (i = 0; i < obj_hdr->num_chunks; i++) {
+		if ((cur < (void *)&chunk_hdr[i]) &&
+		    !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+			     ICP_QAT_UOF_OBJID_LEN)) {
+			return &chunk_hdr[i];
+		}
+	}
+	return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+	int i;
+	unsigned int topbit = 1 << 0xF;
+	unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+	reg ^= inbyte << 0x8;
+	for (i = 0; i < 0x8; i++) {
+		if (reg & topbit)
+			reg = (reg << 1) ^ 0x1021;
+		else
+			reg <<= 1;
+	}
+	return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+	unsigned int chksum = 0;
+
+	if (ptr)
+		while (num--)
+			chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+	return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+		   char *chunk_id)
+{
+	struct icp_qat_uof_filechunkhdr *file_chunk;
+	struct icp_qat_uclo_objhdr *obj_hdr;
+	char *chunk;
+	int i;
+
+	file_chunk = (struct icp_qat_uof_filechunkhdr *)
+		(buf + sizeof(struct icp_qat_uof_filehdr));
+	for (i = 0; i < file_hdr->num_chunks; i++) {
+		if (!strncmp(file_chunk->chunk_id, chunk_id,
+			     ICP_QAT_UOF_OBJID_LEN)) {
+			chunk = buf + file_chunk->offset;
+			if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+				chunk, file_chunk->size))
+				break;
+			obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+			if (!obj_hdr)
+				break;
+			obj_hdr->file_buff = chunk;
+			obj_hdr->checksum = file_chunk->checksum;
+			obj_hdr->size = file_chunk->size;
+			return obj_hdr;
+		}
+		file_chunk++;
+	}
+	return NULL;
+}
+
+static unsigned int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+			    struct icp_qat_uof_image *image)
+{
+	struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+	struct icp_qat_uof_objtable *neigh_reg_tab;
+	struct icp_qat_uof_code_page *code_page;
+
+	code_page = (struct icp_qat_uof_code_page *)
+			((char *)image + sizeof(struct icp_qat_uof_image));
+	uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+		     code_page->uc_var_tab_offset);
+	imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+		      code_page->imp_var_tab_offset);
+	imp_expr_tab = (struct icp_qat_uof_objtable *)
+		       (encap_uof_obj->beg_uof +
+		       code_page->imp_expr_tab_offset);
+	if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+	    imp_expr_tab->entry_num) {
+		pr_err("QAT: UOF can't contain imported variable to be parsed\n");
+		return -EINVAL;
+	}
+	neigh_reg_tab = (struct icp_qat_uof_objtable *)
+			(encap_uof_obj->beg_uof +
+			code_page->neigh_reg_tab_offset);
+	if (neigh_reg_tab->entry_num) {
+		pr_err("QAT: UOF can't contain shared control store feature\n");
+		return -EINVAL;
+	}
+	if (image->numpages > 1) {
+		pr_err("QAT: UOF can't contain multiple pages\n");
+		return -EINVAL;
+	}
+	if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+		pr_err("QAT: UOF can't use shared control store feature\n");
+		return -EFAULT;
+	}
+	if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+		pr_err("QAT: UOF can't use reloadable feature\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+				     *encap_uof_obj,
+				     struct icp_qat_uof_image *img,
+				     struct icp_qat_uclo_encap_page *page)
+{
+	struct icp_qat_uof_code_page *code_page;
+	struct icp_qat_uof_code_area *code_area;
+	struct icp_qat_uof_objtable *uword_block_tab;
+	struct icp_qat_uof_uword_block *uwblock;
+	int i;
+
+	code_page = (struct icp_qat_uof_code_page *)
+			((char *)img + sizeof(struct icp_qat_uof_image));
+	page->def_page = code_page->def_page;
+	page->page_region = code_page->page_region;
+	page->beg_addr_v = code_page->beg_addr_v;
+	page->beg_addr_p = code_page->beg_addr_p;
+	code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+						code_page->code_area_offset);
+	page->micro_words_num = code_area->micro_words_num;
+	uword_block_tab = (struct icp_qat_uof_objtable *)
+			  (encap_uof_obj->beg_uof +
+			  code_area->uword_block_tab);
+	page->uwblock_num = uword_block_tab->entry_num;
+	uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+			sizeof(struct icp_qat_uof_objtable));
+	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+	for (i = 0; i < uword_block_tab->entry_num; i++)
+		page->uwblock[i].micro_words =
+		(uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+			       struct icp_qat_uclo_encapme *ae_uimage,
+			       int max_image)
+{
+	int i, j;
+	struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+	struct icp_qat_uof_image *image;
+	struct icp_qat_uof_objtable *ae_regtab;
+	struct icp_qat_uof_objtable *init_reg_sym_tab;
+	struct icp_qat_uof_objtable *sbreak_tab;
+	struct icp_qat_uof_encap_obj *encap_uof_obj =
+					&obj_handle->encap_uof_obj;
+
+	for (j = 0; j < max_image; j++) {
+		chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+						ICP_QAT_UOF_IMAG, chunk_hdr);
+		if (!chunk_hdr)
+			break;
+		image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+						     chunk_hdr->offset);
+		ae_regtab = (struct icp_qat_uof_objtable *)
+			   (image->reg_tab_offset +
+			   obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+		ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+			(((char *)ae_regtab) +
+			sizeof(struct icp_qat_uof_objtable));
+		init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+				   (image->init_reg_sym_tab +
+				   obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+		ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+			(((char *)init_reg_sym_tab) +
+			sizeof(struct icp_qat_uof_objtable));
+		sbreak_tab = (struct icp_qat_uof_objtable *)
+			(image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+		ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+				      (((char *)sbreak_tab) +
+				      sizeof(struct icp_qat_uof_objtable));
+		ae_uimage[j].img_ptr = image;
+		if (qat_uclo_check_image_compat(encap_uof_obj, image))
+			goto out_err;
+		ae_uimage[j].page =
+			kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+				GFP_KERNEL);
+		if (!ae_uimage[j].page)
+			goto out_err;
+		qat_uclo_map_image_page(encap_uof_obj, image,
+					ae_uimage[j].page);
+	}
+	return j;
+out_err:
+	for (i = 0; i < j; i++)
+		kfree(ae_uimage[i].page);
+	return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+	int i, ae;
+	int mflag = 0;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+	for (ae = 0; ae < max_ae; ae++) {
+		if (!test_bit(ae,
+			      (unsigned long *)&handle->hal_handle->ae_mask))
+			continue;
+		for (i = 0; i < obj_handle->uimage_num; i++) {
+			if (!test_bit(ae, (unsigned long *)
+			&obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+				continue;
+			mflag = 1;
+			if (qat_uclo_init_ae_data(obj_handle, ae, i))
+				return -EINVAL;
+		}
+	}
+	if (!mflag) {
+		pr_err("QAT: uimage uses AE not set\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+		       char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+	chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+					obj_hdr->file_buff, tab_name, NULL);
+	if (chunk_hdr) {
+		int hdr_size;
+
+		memcpy(&str_table->table_len, obj_hdr->file_buff +
+		       chunk_hdr->offset, sizeof(str_table->table_len));
+		hdr_size = (char *)&str_table->strings - (char *)str_table;
+		str_table->strings = (uintptr_t)obj_hdr->file_buff +
+					chunk_hdr->offset + hdr_size;
+		return str_table;
+	}
+	return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+			   struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+	chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+					ICP_QAT_UOF_IMEM, NULL);
+	if (chunk_hdr) {
+		memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+			chunk_hdr->offset, sizeof(unsigned int));
+		init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+		(encap_uof_obj->beg_uof + chunk_hdr->offset +
+		sizeof(unsigned int));
+	}
+}
+
+static unsigned int
+qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
+{
+	switch (handle->pci_dev->device) {
+	case ADF_DH895XCC_PCI_DEVICE_ID:
+		return ICP_QAT_AC_895XCC_DEV_TYPE;
+	case ADF_C62X_PCI_DEVICE_ID:
+		return ICP_QAT_AC_C62X_DEV_TYPE;
+	case ADF_C3XXX_PCI_DEVICE_ID:
+		return ICP_QAT_AC_C3XXX_DEV_TYPE;
+	default:
+		pr_err("QAT: unsupported device 0x%x\n",
+		       handle->pci_dev->device);
+		return 0;
+	}
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+	unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
+		pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+		       obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
+		       prod_type);
+		return -EINVAL;
+	}
+	maj_ver = obj_handle->prod_rev & 0xff;
+	if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
+	    (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
+		pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned char ctx_mask,
+			     enum icp_qat_uof_regtype reg_type,
+			     unsigned short reg_addr, unsigned int value)
+{
+	switch (reg_type) {
+	case ICP_GPA_ABS:
+	case ICP_GPB_ABS:
+		ctx_mask = 0;
+		/* fall through */
+	case ICP_GPA_REL:
+	case ICP_GPB_REL:
+		return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+					reg_addr, value);
+	case ICP_SR_ABS:
+	case ICP_DR_ABS:
+	case ICP_SR_RD_ABS:
+	case ICP_DR_RD_ABS:
+		ctx_mask = 0;
+		/* fall through */
+	case ICP_SR_REL:
+	case ICP_DR_REL:
+	case ICP_SR_RD_REL:
+	case ICP_DR_RD_REL:
+		return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+					    reg_addr, value);
+	case ICP_SR_WR_ABS:
+	case ICP_DR_WR_ABS:
+		ctx_mask = 0;
+		/* fall through */
+	case ICP_SR_WR_REL:
+	case ICP_DR_WR_REL:
+		return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+					    reg_addr, value);
+	case ICP_NEIGH_REL:
+		return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+	default:
+		pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+				 unsigned int ae,
+				 struct icp_qat_uclo_encapme *encap_ae)
+{
+	unsigned int i;
+	unsigned char ctx_mask;
+	struct icp_qat_uof_init_regsym *init_regsym;
+
+	if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+	    ICP_QAT_UCLO_MAX_CTX)
+		ctx_mask = 0xff;
+	else
+		ctx_mask = 0x55;
+
+	for (i = 0; i < encap_ae->init_regsym_num; i++) {
+		unsigned int exp_res;
+
+		init_regsym = &encap_ae->init_regsym[i];
+		exp_res = init_regsym->value;
+		switch (init_regsym->init_type) {
+		case ICP_QAT_UOF_INIT_REG:
+			qat_uclo_init_reg(handle, ae, ctx_mask,
+					  (enum icp_qat_uof_regtype)
+					  init_regsym->reg_type,
+					  (unsigned short)init_regsym->reg_addr,
+					  exp_res);
+			break;
+		case ICP_QAT_UOF_INIT_REG_CTX:
+			/* check if ctx is appropriate for the ctxMode */
+			if (!((1 << init_regsym->ctx) & ctx_mask)) {
+				pr_err("QAT: invalid ctx num = 0x%x\n",
+				       init_regsym->ctx);
+				return -EINVAL;
+			}
+			qat_uclo_init_reg(handle, ae,
+					  (unsigned char)
+					  (1 << init_regsym->ctx),
+					  (enum icp_qat_uof_regtype)
+					  init_regsym->reg_type,
+					  (unsigned short)init_regsym->reg_addr,
+					  exp_res);
+			break;
+		case ICP_QAT_UOF_INIT_EXPR:
+			pr_err("QAT: INIT_EXPR feature not supported\n");
+			return -EINVAL;
+		case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+			pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+			return -EINVAL;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int s, ae;
+
+	if (obj_handle->global_inited)
+		return 0;
+	if (obj_handle->init_mem_tab.entry_num) {
+		if (qat_uclo_init_memory(handle)) {
+			pr_err("QAT: initialize memory failed\n");
+			return -EINVAL;
+		}
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+				continue;
+			if (qat_uclo_init_reg_sym(handle, ae,
+						  obj_handle->ae_data[ae].
+						  ae_slices[s].encap_image))
+				return -EINVAL;
+		}
+	}
+	obj_handle->global_inited = 1;
+	return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae, nn_mode, s;
+	struct icp_qat_uof_image *uof_image;
+	struct icp_qat_uclo_aedata *ae_data;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae,
+			      (unsigned long *)&handle->hal_handle->ae_mask))
+			continue;
+		ae_data = &obj_handle->ae_data[ae];
+		for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+				      ICP_QAT_UCLO_MAX_CTX); s++) {
+			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+				continue;
+			uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+			if (qat_hal_set_ae_ctx_mode(handle, ae,
+						    (char)ICP_QAT_CTX_MODE
+						    (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+				return -EFAULT;
+			}
+			nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+			if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
+				pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+				return -EFAULT;
+			}
+			if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
+						   (char)ICP_QAT_LOC_MEM0_MODE
+						   (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+				return -EFAULT;
+			}
+			if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
+						   (char)ICP_QAT_LOC_MEM1_MODE
+						   (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+				return -EFAULT;
+			}
+		}
+	}
+	return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	struct icp_qat_uclo_encapme *image;
+	int a;
+
+	for (a = 0; a < obj_handle->uimage_num; a++) {
+		image = &obj_handle->ae_uimage[a];
+		image->uwords_num = image->page->beg_addr_p +
+					image->page->micro_words_num;
+	}
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae;
+
+	obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+	obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+					     obj_handle->obj_hdr->file_buff;
+	obj_handle->uword_in_bytes = 6;
+	obj_handle->prod_type = qat_uclo_get_dev_type(handle);
+	obj_handle->prod_rev = PID_MAJOR_REV |
+			(PID_MINOR_REV & handle->hal_handle->revision_id);
+	if (qat_uclo_check_uof_compat(obj_handle)) {
+		pr_err("QAT: UOF incompatible\n");
+		return -EINVAL;
+	}
+	obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+					GFP_KERNEL);
+	if (!obj_handle->uword_buf)
+		return -ENOMEM;
+	obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+	if (!obj_handle->obj_hdr->file_buff ||
+	    !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+				    &obj_handle->str_table)) {
+		pr_err("QAT: UOF doesn't have effective images\n");
+		goto out_err;
+	}
+	obj_handle->uimage_num =
+		qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+				    ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+	if (!obj_handle->uimage_num)
+		goto out_err;
+	if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+		pr_err("QAT: Bad object\n");
+		goto out_check_uof_aemask_err;
+	}
+	qat_uclo_init_uword_num(handle);
+	qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+				   &obj_handle->init_mem_tab);
+	if (qat_uclo_set_ae_mode(handle))
+		goto out_check_uof_aemask_err;
+	return 0;
+out_check_uof_aemask_err:
+	for (ae = 0; ae < obj_handle->uimage_num; ae++)
+		kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+	kfree(obj_handle->uword_buf);
+	return -EFAULT;
+}
+
+static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+				      struct icp_qat_suof_filehdr *suof_ptr,
+				      int suof_size)
+{
+	unsigned int check_sum = 0;
+	unsigned int min_ver_offset = 0;
+	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+
+	suof_handle->file_id = ICP_QAT_SUOF_FID;
+	suof_handle->suof_buf = (char *)suof_ptr;
+	suof_handle->suof_size = suof_size;
+	min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
+					      min_ver);
+	check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
+					       min_ver_offset);
+	if (check_sum != suof_ptr->check_sum) {
+		pr_err("QAT: incorrect SUOF checksum\n");
+		return -EINVAL;
+	}
+	suof_handle->check_sum = suof_ptr->check_sum;
+	suof_handle->min_ver = suof_ptr->min_ver;
+	suof_handle->maj_ver = suof_ptr->maj_ver;
+	suof_handle->fw_type = suof_ptr->fw_type;
+	return 0;
+}
+
+static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
+			      struct icp_qat_suof_img_hdr *suof_img_hdr,
+			      struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+	struct icp_qat_simg_ae_mode *ae_mode;
+	struct icp_qat_suof_objhdr *suof_objhdr;
+
+	suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
+				   suof_chunk_hdr->offset +
+				   sizeof(*suof_objhdr));
+	suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
+				  (suof_handle->suof_buf +
+				   suof_chunk_hdr->offset))->img_length;
+
+	suof_img_hdr->css_header = suof_img_hdr->simg_buf;
+	suof_img_hdr->css_key = (suof_img_hdr->css_header +
+				 sizeof(struct icp_qat_css_hdr));
+	suof_img_hdr->css_signature = suof_img_hdr->css_key +
+				      ICP_QAT_CSS_FWSK_MODULUS_LEN +
+				      ICP_QAT_CSS_FWSK_EXPONENT_LEN;
+	suof_img_hdr->css_simg = suof_img_hdr->css_signature +
+				 ICP_QAT_CSS_SIGNATURE_LEN;
+
+	ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
+	suof_img_hdr->ae_mask = ae_mode->ae_mask;
+	suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
+	suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
+	suof_img_hdr->fw_type = ae_mode->fw_type;
+}
+
+static void
+qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
+			  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+	char **sym_str = (char **)&suof_handle->sym_str;
+	unsigned int *sym_size = &suof_handle->sym_size;
+	struct icp_qat_suof_strtable *str_table_obj;
+
+	*sym_size = *(unsigned int *)(uintptr_t)
+		   (suof_chunk_hdr->offset + suof_handle->suof_buf);
+	*sym_str = (char *)(uintptr_t)
+		   (suof_handle->suof_buf + suof_chunk_hdr->offset +
+		   sizeof(str_table_obj->tab_length));
+}
+
+static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
+				      struct icp_qat_suof_img_hdr *img_hdr)
+{
+	struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
+	unsigned int prod_rev, maj_ver, prod_type;
+
+	prod_type = qat_uclo_get_dev_type(handle);
+	img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
+	prod_rev = PID_MAJOR_REV |
+			 (PID_MINOR_REV & handle->hal_handle->revision_id);
+	if (img_ae_mode->dev_type != prod_type) {
+		pr_err("QAT: incompatible product type %x\n",
+		       img_ae_mode->dev_type);
+		return -EINVAL;
+	}
+	maj_ver = prod_rev & 0xff;
+	if ((maj_ver > img_ae_mode->devmax_ver) ||
+	    (maj_ver < img_ae_mode->devmin_ver)) {
+		pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+
+	kfree(sobj_handle->img_table.simg_hdr);
+	sobj_handle->img_table.simg_hdr = NULL;
+	kfree(handle->sobj_handle);
+	handle->sobj_handle = NULL;
+}
+
+static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
+			      unsigned int img_id, unsigned int num_simgs)
+{
+	struct icp_qat_suof_img_hdr img_header;
+
+	if (img_id != num_simgs - 1) {
+		memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
+		       sizeof(*suof_img_hdr));
+		memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
+		       sizeof(*suof_img_hdr));
+		memcpy(&suof_img_hdr[img_id], &img_header,
+		       sizeof(*suof_img_hdr));
+	}
+}
+
+static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
+			     struct icp_qat_suof_filehdr *suof_ptr,
+			     int suof_size)
+{
+	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+	struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
+	struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
+	int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
+	unsigned int i = 0;
+	struct icp_qat_suof_img_hdr img_header;
+
+	if (!suof_ptr || (suof_size == 0)) {
+		pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+		return -EINVAL;
+	}
+	if (qat_uclo_check_suof_format(suof_ptr))
+		return -EINVAL;
+	ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
+	if (ret)
+		return ret;
+	suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
+			 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
+
+	qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
+	suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
+
+	if (suof_handle->img_table.num_simgs != 0) {
+		suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
+				       sizeof(img_header),
+				       GFP_KERNEL);
+		if (!suof_img_hdr)
+			return -ENOMEM;
+		suof_handle->img_table.simg_hdr = suof_img_hdr;
+	}
+
+	for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
+		qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
+				  &suof_chunk_hdr[1 + i]);
+		ret = qat_uclo_check_simg_compat(handle,
+						 &suof_img_hdr[i]);
+		if (ret)
+			return ret;
+		if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
+			ae0_img = i;
+	}
+	qat_uclo_tail_img(suof_img_hdr, ae0_img,
+			  suof_handle->img_table.num_simgs);
+	return 0;
+}
+
+#define ADD_ADDR(high, low)  ((((uint64_t)high) << 32) + low)
+#define BITS_IN_DWORD 32
+
+static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
+			    struct icp_qat_fw_auth_desc *desc)
+{
+	unsigned int fcu_sts, retry = 0;
+	u64 bus_addr;
+
+	bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
+			   - sizeof(struct icp_qat_auth_chunk);
+	SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
+	SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
+	SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
+
+	do {
+		msleep(FW_AUTH_WAIT_PERIOD);
+		fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+		if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
+			goto auth_fail;
+		if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
+			if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
+				return 0;
+	} while (retry++ < FW_AUTH_MAX_RETRY);
+auth_fail:
+	pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+	       fcu_sts & FCU_AUTH_STS_MASK, retry);
+	return -EINVAL;
+}
+
+static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
+			       struct icp_firml_dram_desc *dram_desc,
+			       unsigned int size)
+{
+	void *vptr;
+	dma_addr_t ptr;
+
+	vptr = dma_alloc_coherent(&handle->pci_dev->dev,
+				  size, &ptr, GFP_KERNEL);
+	if (!vptr)
+		return -ENOMEM;
+	dram_desc->dram_base_addr_v = vptr;
+	dram_desc->dram_bus_addr = ptr;
+	dram_desc->dram_size = size;
+	return 0;
+}
+
+static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
+			       struct icp_firml_dram_desc *dram_desc)
+{
+	dma_free_coherent(&handle->pci_dev->dev,
+			  (size_t)(dram_desc->dram_size),
+			  (dram_desc->dram_base_addr_v),
+			  dram_desc->dram_bus_addr);
+	memset(dram_desc, 0, sizeof(*dram_desc));
+}
+
+static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
+				   struct icp_qat_fw_auth_desc **desc)
+{
+	struct icp_firml_dram_desc dram_desc;
+
+	dram_desc.dram_base_addr_v = *desc;
+	dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
+				   (*desc))->chunk_bus_addr;
+	dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
+			       (*desc))->chunk_size;
+	qat_uclo_simg_free(handle, &dram_desc);
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+				char *image, unsigned int size,
+				struct icp_qat_fw_auth_desc **desc)
+{
+	struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+	struct icp_qat_fw_auth_desc *auth_desc;
+	struct icp_qat_auth_chunk *auth_chunk;
+	u64 virt_addr,  bus_addr, virt_base;
+	unsigned int length, simg_offset = sizeof(*auth_chunk);
+	struct icp_firml_dram_desc img_desc;
+
+	if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
+		pr_err("QAT: error, input image size overflow %d\n", size);
+		return -EINVAL;
+	}
+	length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
+		 ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
+		 size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
+	if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
+		pr_err("QAT: error, allocate continuous dram fail\n");
+		return -ENOMEM;
+	}
+
+	auth_chunk = img_desc.dram_base_addr_v;
+	auth_chunk->chunk_size = img_desc.dram_size;
+	auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+	virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
+	bus_addr  = img_desc.dram_bus_addr + simg_offset;
+	auth_desc = img_desc.dram_base_addr_v;
+	auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->css_hdr_low = (unsigned int)bus_addr;
+	virt_addr = virt_base;
+
+	memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+	/* pub key */
+	bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
+			   sizeof(*css_hdr);
+	virt_addr = virt_addr + sizeof(*css_hdr);
+
+	auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + sizeof(*css_hdr)),
+	       ICP_QAT_CSS_FWSK_MODULUS_LEN);
+	/* padding */
+	memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
+	       0, ICP_QAT_CSS_FWSK_PAD_LEN);
+
+	/* exponent */
+	memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
+	       ICP_QAT_CSS_FWSK_PAD_LEN),
+	       (void *)(image + sizeof(*css_hdr) +
+			ICP_QAT_CSS_FWSK_MODULUS_LEN),
+	       sizeof(unsigned int));
+
+	/* signature */
+	bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
+			    auth_desc->fwsk_pub_low) +
+		   ICP_QAT_CSS_FWSK_PUB_LEN;
+	virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
+	auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->signature_low = (unsigned int)bus_addr;
+
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + sizeof(*css_hdr) +
+	       ICP_QAT_CSS_FWSK_MODULUS_LEN +
+	       ICP_QAT_CSS_FWSK_EXPONENT_LEN),
+	       ICP_QAT_CSS_SIGNATURE_LEN);
+
+	bus_addr = ADD_ADDR(auth_desc->signature_high,
+			    auth_desc->signature_low) +
+		   ICP_QAT_CSS_SIGNATURE_LEN;
+	virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
+
+	auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->img_low = (unsigned int)bus_addr;
+	auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + ICP_QAT_AE_IMG_OFFSET),
+	       auth_desc->img_len);
+	virt_addr = virt_base;
+	/* AE firmware */
+	if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
+	    CSS_AE_FIRMWARE) {
+		auth_desc->img_ae_mode_data_high = auth_desc->img_high;
+		auth_desc->img_ae_mode_data_low = auth_desc->img_low;
+		bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
+				    auth_desc->img_ae_mode_data_low) +
+			   sizeof(struct icp_qat_simg_ae_mode);
+
+		auth_desc->img_ae_init_data_high = (unsigned int)
+						 (bus_addr >> BITS_IN_DWORD);
+		auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+		bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+		auth_desc->img_ae_insts_high = (unsigned int)
+					     (bus_addr >> BITS_IN_DWORD);
+		auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+	} else {
+		auth_desc->img_ae_insts_high = auth_desc->img_high;
+		auth_desc->img_ae_insts_low = auth_desc->img_low;
+	}
+	*desc = auth_desc;
+	return 0;
+}
+
+static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
+			    struct icp_qat_fw_auth_desc *desc)
+{
+	unsigned int i;
+	unsigned int fcu_sts;
+	struct icp_qat_simg_ae_mode *virt_addr;
+	unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
+
+	virt_addr = (void *)((uintptr_t)desc +
+		     sizeof(struct icp_qat_auth_chunk) +
+		     sizeof(struct icp_qat_css_hdr) +
+		     ICP_QAT_CSS_FWSK_PUB_LEN +
+		     ICP_QAT_CSS_SIGNATURE_LEN);
+	for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
+		int retry = 0;
+
+		if (!((virt_addr->ae_mask >> i) & 0x1))
+			continue;
+		if (qat_hal_check_ae_active(handle, i)) {
+			pr_err("QAT: AE %d is active\n", i);
+			return -EINVAL;
+		}
+		SET_CAP_CSR(handle, FCU_CONTROL,
+			    (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
+
+		do {
+			msleep(FW_AUTH_WAIT_PERIOD);
+			fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+			if (((fcu_sts & FCU_AUTH_STS_MASK) ==
+			    FCU_STS_LOAD_DONE) &&
+			    ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
+				break;
+		} while (retry++ < FW_AUTH_MAX_RETRY);
+		if (retry > FW_AUTH_MAX_RETRY) {
+			pr_err("QAT: firmware load failed timeout %x\n", retry);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
+				 void *addr_ptr, int mem_size)
+{
+	struct icp_qat_suof_handle *suof_handle;
+
+	suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
+	if (!suof_handle)
+		return -ENOMEM;
+	handle->sobj_handle = suof_handle;
+	if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
+		qat_uclo_del_suof(handle);
+		pr_err("QAT: map SUOF failed\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+		       void *addr_ptr, int mem_size)
+{
+	struct icp_qat_fw_auth_desc *desc = NULL;
+	int status = 0;
+
+	if (handle->fw_auth) {
+		if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
+			status = qat_uclo_auth_fw(handle, desc);
+		qat_uclo_ummap_auth_fw(handle, &desc);
+	} else {
+		if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
+			pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
+			return -EINVAL;
+		}
+		qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
+	}
+	return status;
+}
+
+static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+				void *addr_ptr, int mem_size)
+{
+	struct icp_qat_uof_filehdr *filehdr;
+	struct icp_qat_uclo_objhandle *objhdl;
+
+	objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+	if (!objhdl)
+		return -ENOMEM;
+	objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+	if (!objhdl->obj_buf)
+		goto out_objbuf_err;
+	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+	if (qat_uclo_check_uof_format(filehdr))
+		goto out_objhdr_err;
+	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+					     ICP_QAT_UOF_OBJS);
+	if (!objhdl->obj_hdr) {
+		pr_err("QAT: object file chunk is null\n");
+		goto out_objhdr_err;
+	}
+	handle->obj_handle = objhdl;
+	if (qat_uclo_parse_uof_obj(handle))
+		goto out_overlay_obj_err;
+	return 0;
+
+out_overlay_obj_err:
+	handle->obj_handle = NULL;
+	kfree(objhdl->obj_hdr);
+out_objhdr_err:
+	kfree(objhdl->obj_buf);
+out_objbuf_err:
+	kfree(objhdl);
+	return -ENOMEM;
+}
+
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+		     void *addr_ptr, int mem_size)
+{
+	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+		     (sizeof(handle->hal_handle->ae_mask) * 8));
+
+	if (!handle || !addr_ptr || mem_size < 24)
+		return -EINVAL;
+
+	return (handle->fw_auth) ?
+			qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
+			qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
+}
+
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int a;
+
+	if (handle->sobj_handle)
+		qat_uclo_del_suof(handle);
+	if (!obj_handle)
+		return;
+
+	kfree(obj_handle->uword_buf);
+	for (a = 0; a < obj_handle->uimage_num; a++)
+		kfree(obj_handle->ae_uimage[a].page);
+
+	for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+		qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+	kfree(obj_handle->obj_hdr);
+	kfree(obj_handle->obj_buf);
+	kfree(obj_handle);
+	handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+				 struct icp_qat_uclo_encap_page *encap_page,
+				 uint64_t *uword, unsigned int addr_p,
+				 unsigned int raddr, uint64_t fill)
+{
+	uint64_t uwrd = 0;
+	unsigned int i;
+
+	if (!encap_page) {
+		*uword = fill;
+		return;
+	}
+	for (i = 0; i < encap_page->uwblock_num; i++) {
+		if (raddr >= encap_page->uwblock[i].start_addr &&
+		    raddr <= encap_page->uwblock[i].start_addr +
+		    encap_page->uwblock[i].words_num - 1) {
+			raddr -= encap_page->uwblock[i].start_addr;
+			raddr *= obj_handle->uword_in_bytes;
+			memcpy(&uwrd, (void *)(((uintptr_t)
+			       encap_page->uwblock[i].micro_words) + raddr),
+			       obj_handle->uword_in_bytes);
+			uwrd = uwrd & 0xbffffffffffull;
+		}
+	}
+	*uword = uwrd;
+	if (*uword == INVLD_UWORD)
+		*uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+					struct icp_qat_uclo_encap_page
+					*encap_page, unsigned int ae)
+{
+	unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	uint64_t fill_pat;
+
+	/* load the page starting at appropriate ustore address */
+	/* get fill-pattern from an image -- they are all the same */
+	memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+	       sizeof(uint64_t));
+	uw_physical_addr = encap_page->beg_addr_p;
+	uw_relative_addr = 0;
+	words_num = encap_page->micro_words_num;
+	while (words_num) {
+		if (words_num < UWORD_CPYBUF_SIZE)
+			cpylen = words_num;
+		else
+			cpylen = UWORD_CPYBUF_SIZE;
+
+		/* load the buffer */
+		for (i = 0; i < cpylen; i++)
+			qat_uclo_fill_uwords(obj_handle, encap_page,
+					     &obj_handle->uword_buf[i],
+					     uw_physical_addr + i,
+					     uw_relative_addr + i, fill_pat);
+
+		/* copy the buffer to ustore */
+		qat_hal_wr_uwords(handle, (unsigned char)ae,
+				  uw_physical_addr, cpylen,
+				  obj_handle->uword_buf);
+
+		uw_physical_addr += cpylen;
+		uw_relative_addr += cpylen;
+		words_num -= cpylen;
+	}
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+				    struct icp_qat_uof_image *image)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ctx_mask, s;
+	struct icp_qat_uclo_page *page;
+	unsigned char ae;
+	int ctx;
+
+	if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+		ctx_mask = 0xff;
+	else
+		ctx_mask = 0x55;
+	/* load the default page and set assigned CTX PC
+	 * to the entrypoint address */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
+			continue;
+		/* find the slice to which this image is assigned */
+		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+			if (image->ctx_assigned & obj_handle->ae_data[ae].
+			    ae_slices[s].ctx_mask_assigned)
+				break;
+		}
+		if (s >= obj_handle->ae_data[ae].slice_num)
+			continue;
+		page = obj_handle->ae_data[ae].ae_slices[s].page;
+		if (!page->encap_page->def_page)
+			continue;
+		qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+		page = obj_handle->ae_data[ae].ae_slices[s].page;
+		for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+			obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
+					(ctx_mask & (1 << ctx)) ? page : NULL;
+		qat_hal_set_live_ctx(handle, (unsigned char)ae,
+				     image->ctx_assigned);
+		qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+			       image->entry_address);
+	}
+}
+
+static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int i;
+	struct icp_qat_fw_auth_desc *desc = NULL;
+	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+	struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+
+	for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+		if (qat_uclo_map_auth_fw(handle,
+					 (char *)simg_hdr[i].simg_buf,
+					 (unsigned int)
+					 (simg_hdr[i].simg_len),
+					 &desc))
+			goto wr_err;
+		if (qat_uclo_auth_fw(handle, desc))
+			goto wr_err;
+		if (qat_uclo_load_fw(handle, desc))
+			goto wr_err;
+		qat_uclo_ummap_auth_fw(handle, &desc);
+	}
+	return 0;
+wr_err:
+	qat_uclo_ummap_auth_fw(handle, &desc);
+	return -EINVAL;
+}
+
+static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int i;
+
+	if (qat_uclo_init_globals(handle))
+		return -EINVAL;
+	for (i = 0; i < obj_handle->uimage_num; i++) {
+		if (!obj_handle->ae_uimage[i].img_ptr)
+			return -EINVAL;
+		if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+			return -EINVAL;
+		qat_uclo_wr_uimage_page(handle,
+					obj_handle->ae_uimage[i].img_ptr);
+	}
+	return 0;
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+	return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
+				   qat_uclo_wr_uof_img(handle);
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
new file mode 100644
index 0000000..180a00e
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644
index 0000000..1dfcab3
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -0,0 +1,262 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xcc_hw_data.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const uint32_t thrd_to_arb_map_sku4[] = {
+	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+static const uint32_t thrd_to_arb_map_sku6[] = {
+	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+	.name = ADF_DH895XCC_DEVICE_NAME,
+	.type = DEV_DH895XCC,
+	.instances = 0
+};
+
+static uint32_t get_accel_mask(uint32_t fuse)
+{
+	return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+			  ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static uint32_t get_ae_mask(uint32_t fuse)
+{
+	return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static uint32_t get_num_accels(struct adf_hw_device_data *self)
+{
+	uint32_t i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static uint32_t get_num_aes(struct adf_hw_device_data *self)
+{
+	uint32_t i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_PMISC_BAR;
+}
+
+static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_ETR_BAR;
+}
+
+static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+	    >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+	switch (sku) {
+	case ADF_DH895XCC_FUSECTL_SKU_1:
+		return DEV_SKU_1;
+	case ADF_DH895XCC_FUSECTL_SKU_2:
+		return DEV_SKU_2;
+	case ADF_DH895XCC_FUSECTL_SKU_3:
+		return DEV_SKU_3;
+	case ADF_DH895XCC_FUSECTL_SKU_4:
+		return DEV_SKU_4;
+	default:
+		return DEV_SKU_UNKNOWN;
+	}
+	return DEV_SKU_UNKNOWN;
+}
+
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_1:
+		*arb_map_config = thrd_to_arb_map_sku4;
+		break;
+
+	case DEV_SKU_2:
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_sku6;
+		break;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static uint32_t get_pf2vf_offset(uint32_t i)
+{
+	return ADF_DH895XCC_PF2VF_OFFSET(i);
+}
+
+static uint32_t get_vintmsk_offset(uint32_t i)
+{
+	return ADF_DH895XCC_VINTMSK_OFFSET(i);
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
+		val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
+		val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
+		val |= ADF_DH895XCC_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
+		val |= ADF_DH895XCC_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
+	}
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *addr;
+
+	addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
+		   accel_dev->pf.vf_info ? 0 :
+			GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
+	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
+		   ADF_DH895XCC_SMIA1_MASK);
+}
+
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &dh895xcc_class;
+	hw_data->instance_id = dh895xcc_class.instances++;
+	hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_DH895XCC_FW;
+	hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
+	hw_data->init_admin_comms = adf_init_admin_comms;
+	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
+	hw_data->init_arb = adf_init_arb;
+	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->reset_device = adf_reset_sbr;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644
index 0000000..092f735
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -0,0 +1,89 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_DH895XCC_RX_RINGS_OFFSET 8
+#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
+#define ADF_DH895XCC_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_DH895XCC_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
+
+#define ADF_DH895XCC_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_DH895XCC_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+/* FW names */
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
new file mode 100644
index 0000000..3a9708e
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -0,0 +1,337 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_dh895xcc_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_DH895XCC_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_DH895XCC_PCI_DEVICE_ID:
+			adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_DH895XCC_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+		/* If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow. */
+		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	pcie_set_readrq(pdev, 1024);
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err_free_reg;
+	}
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err_free_reg;
+	}
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_DH895XCC_FW);
+MODULE_FIRMWARE(ADF_DH895XCC_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
new file mode 100644
index 0000000..5c3ccf8
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644
index 0000000..a3b4dd8
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -0,0 +1,150 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+	.name = ADF_DH895XCCVF_DEVICE_NAME,
+	.type = DEV_DH895XCCVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_DH895XCCIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_DH895XCCIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &dh895xcciov_class;
+	hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644
index 0000000..6ddc19b
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -0,0 +1,64 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+#define ADF_DH895XCCIOV_PF2VF_OFFSET	0x200
+#define ADF_DH895XCCIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644
index 0000000..3da0f95
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_DH895XCCVF_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
new file mode 100644
index 0000000..19a7f89
--- /dev/null
+++ b/drivers/crypto/qce/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
+qcrypto-objs := core.o \
+		common.o \
+		dma.o \
+		sha.o \
+		ablkcipher.o
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
new file mode 100644
index 0000000..ea4d96b
--- /dev/null
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/internal/skcipher.h>
+
+#include "cipher.h"
+
+static LIST_HEAD(ablkcipher_algs);
+
+static void qce_ablkcipher_done(void *data)
+{
+	struct crypto_async_request *async_req = data;
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	enum dma_data_direction dir_src, dir_dst;
+	u32 status;
+	int error;
+	bool diff_dst;
+
+	diff_dst = (req->src != req->dst) ? true : false;
+	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+	error = qce_dma_terminate_all(&qce->dma);
+	if (error)
+		dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+			error);
+
+	if (diff_dst)
+		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
+	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+
+	sg_free_table(&rctx->dst_tbl);
+
+	error = qce_check_status(qce, &status);
+	if (error < 0)
+		dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+
+	qce->async_req_done(tmpl->qce, error);
+}
+
+static int
+qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	enum dma_data_direction dir_src, dir_dst;
+	struct scatterlist *sg;
+	bool diff_dst;
+	gfp_t gfp;
+	int ret;
+
+	rctx->iv = req->info;
+	rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	rctx->cryptlen = req->nbytes;
+
+	diff_dst = (req->src != req->dst) ? true : false;
+	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (diff_dst)
+		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+	else
+		rctx->dst_nents = rctx->src_nents;
+	if (rctx->src_nents < 0) {
+		dev_err(qce->dev, "Invalid numbers of src SG.\n");
+		return rctx->src_nents;
+	}
+	if (rctx->dst_nents < 0) {
+		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
+		return -rctx->dst_nents;
+	}
+
+	rctx->dst_nents += 1;
+
+	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+						GFP_KERNEL : GFP_ATOMIC;
+
+	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+	if (ret)
+		return ret;
+
+	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+	if (IS_ERR(sg)) {
+		ret = PTR_ERR(sg);
+		goto error_free;
+	}
+
+	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+	if (IS_ERR(sg)) {
+		ret = PTR_ERR(sg);
+		goto error_free;
+	}
+
+	sg_mark_end(sg);
+	rctx->dst_sg = rctx->dst_tbl.sgl;
+
+	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+	if (ret < 0)
+		goto error_free;
+
+	if (diff_dst) {
+		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+		if (ret < 0)
+			goto error_unmap_dst;
+		rctx->src_sg = req->src;
+	} else {
+		rctx->src_sg = rctx->dst_sg;
+	}
+
+	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
+			       rctx->dst_sg, rctx->dst_nents,
+			       qce_ablkcipher_done, async_req);
+	if (ret)
+		goto error_unmap_src;
+
+	qce_dma_issue_pending(&qce->dma);
+
+	ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+	if (ret)
+		goto error_terminate;
+
+	return 0;
+
+error_terminate:
+	qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+	if (diff_dst)
+		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+error_unmap_dst:
+	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+error_free:
+	sg_free_table(&rctx->dst_tbl);
+	return ret;
+}
+
+static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+				 unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
+	int ret;
+
+	if (!key || !keylen)
+		return -EINVAL;
+
+	if (IS_AES(flags)) {
+		switch (keylen) {
+		case AES_KEYSIZE_128:
+		case AES_KEYSIZE_256:
+			break;
+		default:
+			goto fallback;
+		}
+	} else if (IS_DES(flags)) {
+		u32 tmp[DES_EXPKEY_WORDS];
+
+		ret = des_ekey(tmp, key);
+		if (!ret && crypto_ablkcipher_get_flags(ablk) &
+		    CRYPTO_TFM_REQ_WEAK_KEY)
+			goto weakkey;
+	}
+
+	ctx->enc_keylen = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	return 0;
+fallback:
+	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	if (!ret)
+		ctx->enc_keylen = keylen;
+	return ret;
+weakkey:
+	crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
+	return -EINVAL;
+}
+
+static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+{
+	struct crypto_tfm *tfm =
+			crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+	int ret;
+
+	rctx->flags = tmpl->alg_flags;
+	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+	if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
+	    ctx->enc_keylen != AES_KEYSIZE_256) {
+		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+		skcipher_request_set_tfm(subreq, ctx->fallback);
+		skcipher_request_set_callback(subreq, req->base.flags,
+					      NULL, NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+					   req->nbytes, req->info);
+		ret = encrypt ? crypto_skcipher_encrypt(subreq) :
+				crypto_skcipher_decrypt(subreq);
+		skcipher_request_zero(subreq);
+		return ret;
+	}
+
+	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	return qce_ablkcipher_crypt(req, 1);
+}
+
+static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	return qce_ablkcipher_crypt(req, 0);
+}
+
+static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	memset(ctx, 0, sizeof(*ctx));
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+
+	ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
+					      CRYPTO_ALG_ASYNC |
+					      CRYPTO_ALG_NEED_FALLBACK);
+	return PTR_ERR_OR_ZERO(ctx->fallback);
+}
+
+static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_skcipher(ctx->fallback);
+}
+
+struct qce_ablkcipher_def {
+	unsigned long flags;
+	const char *name;
+	const char *drv_name;
+	unsigned int blocksize;
+	unsigned int ivsize;
+	unsigned int min_keysize;
+	unsigned int max_keysize;
+};
+
+static const struct qce_ablkcipher_def ablkcipher_def[] = {
+	{
+		.flags		= QCE_ALG_AES | QCE_MODE_ECB,
+		.name		= "ecb(aes)",
+		.drv_name	= "ecb-aes-qce",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_AES | QCE_MODE_CBC,
+		.name		= "cbc(aes)",
+		.drv_name	= "cbc-aes-qce",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_AES | QCE_MODE_CTR,
+		.name		= "ctr(aes)",
+		.drv_name	= "ctr-aes-qce",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_AES | QCE_MODE_XTS,
+		.name		= "xts(aes)",
+		.drv_name	= "xts-aes-qce",
+		.blocksize	= AES_BLOCK_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_DES | QCE_MODE_ECB,
+		.name		= "ecb(des)",
+		.drv_name	= "ecb-des-qce",
+		.blocksize	= DES_BLOCK_SIZE,
+		.ivsize		= 0,
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_DES | QCE_MODE_CBC,
+		.name		= "cbc(des)",
+		.drv_name	= "cbc-des-qce",
+		.blocksize	= DES_BLOCK_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.min_keysize	= DES_KEY_SIZE,
+		.max_keysize	= DES_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_3DES | QCE_MODE_ECB,
+		.name		= "ecb(des3_ede)",
+		.drv_name	= "ecb-3des-qce",
+		.blocksize	= DES3_EDE_BLOCK_SIZE,
+		.ivsize		= 0,
+		.min_keysize	= DES3_EDE_KEY_SIZE,
+		.max_keysize	= DES3_EDE_KEY_SIZE,
+	},
+	{
+		.flags		= QCE_ALG_3DES | QCE_MODE_CBC,
+		.name		= "cbc(des3_ede)",
+		.drv_name	= "cbc-3des-qce",
+		.blocksize	= DES3_EDE_BLOCK_SIZE,
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.min_keysize	= DES3_EDE_KEY_SIZE,
+		.max_keysize	= DES3_EDE_KEY_SIZE,
+	},
+};
+
+static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+				       struct qce_device *qce)
+{
+	struct qce_alg_template *tmpl;
+	struct crypto_alg *alg;
+	int ret;
+
+	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+	if (!tmpl)
+		return -ENOMEM;
+
+	alg = &tmpl->alg.crypto;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->drv_name);
+
+	alg->cra_blocksize = def->blocksize;
+	alg->cra_ablkcipher.ivsize = def->ivsize;
+	alg->cra_ablkcipher.min_keysize = def->min_keysize;
+	alg->cra_ablkcipher.max_keysize = def->max_keysize;
+	alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+	alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
+	alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
+
+	alg->cra_priority = 300;
+	alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+			 CRYPTO_ALG_NEED_FALLBACK;
+	alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
+	alg->cra_alignmask = 0;
+	alg->cra_type = &crypto_ablkcipher_type;
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = qce_ablkcipher_init;
+	alg->cra_exit = qce_ablkcipher_exit;
+	INIT_LIST_HEAD(&alg->cra_list);
+
+	INIT_LIST_HEAD(&tmpl->entry);
+	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+	tmpl->alg_flags = def->flags;
+	tmpl->qce = qce;
+
+	ret = crypto_register_alg(alg);
+	if (ret) {
+		kfree(tmpl);
+		dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+		return ret;
+	}
+
+	list_add_tail(&tmpl->entry, &ablkcipher_algs);
+	dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+	return 0;
+}
+
+static void qce_ablkcipher_unregister(struct qce_device *qce)
+{
+	struct qce_alg_template *tmpl, *n;
+
+	list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
+		crypto_unregister_alg(&tmpl->alg.crypto);
+		list_del(&tmpl->entry);
+		kfree(tmpl);
+	}
+}
+
+static int qce_ablkcipher_register(struct qce_device *qce)
+{
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
+		ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+err:
+	qce_ablkcipher_unregister(qce);
+	return ret;
+}
+
+const struct qce_algo_ops ablkcipher_ops = {
+	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	.register_algs = qce_ablkcipher_register,
+	.unregister_algs = qce_ablkcipher_unregister,
+	.async_req_handle = qce_ablkcipher_async_req_handle,
+};
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
new file mode 100644
index 0000000..2b0278b
--- /dev/null
+++ b/drivers/crypto/qce/cipher.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CIPHER_H_
+#define _CIPHER_H_
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_MAX_KEY_SIZE	64
+
+struct qce_cipher_ctx {
+	u8 enc_key[QCE_MAX_KEY_SIZE];
+	unsigned int enc_keylen;
+	struct crypto_skcipher *fallback;
+};
+
+/**
+ * struct qce_cipher_reqctx - holds private cipher objects per request
+ * @flags: operation flags
+ * @iv: pointer to the IV
+ * @ivsize: IV size
+ * @src_nents: source entries
+ * @dst_nents: destination entries
+ * @result_sg: scatterlist used for result buffer
+ * @dst_tbl: destination sg table
+ * @dst_sg: destination sg pointer table beginning
+ * @src_tbl: source sg table
+ * @src_sg: source sg pointer table beginning;
+ * @cryptlen: crypto length
+ */
+struct qce_cipher_reqctx {
+	unsigned long flags;
+	u8 *iv;
+	unsigned int ivsize;
+	int src_nents;
+	int dst_nents;
+	struct scatterlist result_sg;
+	struct sg_table dst_tbl;
+	struct scatterlist *dst_sg;
+	struct sg_table src_tbl;
+	struct scatterlist *src_sg;
+	unsigned int cryptlen;
+};
+
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	return container_of(alg, struct qce_alg_template, alg.crypto);
+}
+
+extern const struct qce_algo_ops ablkcipher_ops;
+
+#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
new file mode 100644
index 0000000..1fb5fde
--- /dev/null
+++ b/drivers/crypto/qce/common.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "cipher.h"
+#include "common.h"
+#include "core.h"
+#include "regs-v5.h"
+#include "sha.h"
+
+#define QCE_SECTOR_SIZE		512
+
+static inline u32 qce_read(struct qce_device *qce, u32 offset)
+{
+	return readl(qce->base + offset);
+}
+
+static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
+{
+	writel(val, qce->base + offset);
+}
+
+static inline void qce_write_array(struct qce_device *qce, u32 offset,
+				   const u32 *val, unsigned int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		qce_write(qce, offset + i * sizeof(u32), val[i]);
+}
+
+static inline void
+qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		qce_write(qce, offset + i * sizeof(u32), 0);
+}
+
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+	u32 cfg = 0;
+
+	if (IS_AES(flags)) {
+		if (aes_key_size == AES_KEYSIZE_128)
+			cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+		else if (aes_key_size == AES_KEYSIZE_256)
+			cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+	}
+
+	if (IS_AES(flags))
+		cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+	else if (IS_DES(flags) || IS_3DES(flags))
+		cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+	if (IS_DES(flags))
+		cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+	if (IS_3DES(flags))
+		cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+	switch (flags & QCE_MODE_MASK) {
+	case QCE_MODE_ECB:
+		cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+		break;
+	case QCE_MODE_CBC:
+		cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+		break;
+	case QCE_MODE_CTR:
+		cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+		break;
+	case QCE_MODE_XTS:
+		cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+		break;
+	case QCE_MODE_CCM:
+		cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+		cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+		break;
+	default:
+		return ~0;
+	}
+
+	return cfg;
+}
+
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+{
+	u32 cfg = 0;
+
+	if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+		cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
+	else
+		cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
+
+	if (IS_CCM(flags) || IS_CMAC(flags)) {
+		if (key_size == AES_KEYSIZE_128)
+			cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
+		else if (key_size == AES_KEYSIZE_256)
+			cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
+	}
+
+	if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
+		cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
+	else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
+		cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
+	else if (IS_CMAC(flags))
+		cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+
+	if (IS_SHA1(flags) || IS_SHA256(flags))
+		cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
+	else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
+		 IS_CBC(flags) || IS_CTR(flags))
+		cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
+	else if (IS_AES(flags) && IS_CCM(flags))
+		cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
+	else if (IS_AES(flags) && IS_CMAC(flags))
+		cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
+
+	if (IS_SHA(flags) || IS_SHA_HMAC(flags))
+		cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+
+	if (IS_CCM(flags))
+		cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
+
+	if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
+	    IS_CMAC(flags))
+		cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
+
+	return cfg;
+}
+
+static u32 qce_config_reg(struct qce_device *qce, int little)
+{
+	u32 beats = (qce->burst_size >> 3) - 1;
+	u32 pipe_pair = qce->pipe_pair_id;
+	u32 config;
+
+	config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+	config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+		  BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+	config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+	config &= ~HIGH_SPD_EN_N_SHIFT;
+
+	if (little)
+		config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
+
+	return config;
+}
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+	__be32 *d = dst;
+	const u8 *s = src;
+	unsigned int n;
+
+	n = len / sizeof(u32);
+	for (; n > 0; n--) {
+		*d = cpu_to_be32p((const __u32 *) s);
+		s += sizeof(__u32);
+		d++;
+	}
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+	u8 swap[QCE_AES_IV_LENGTH];
+	u32 i, j;
+
+	if (ivsize > QCE_AES_IV_LENGTH)
+		return;
+
+	memset(swap, 0, QCE_AES_IV_LENGTH);
+
+	for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+	     i < QCE_AES_IV_LENGTH; i++, j--)
+		swap[i] = src[j];
+
+	qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+		       unsigned int enckeylen, unsigned int cryptlen)
+{
+	u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+	unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+	unsigned int xtsdusize;
+
+	qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+			       enckeylen / 2);
+	qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+	/* xts du size 512B */
+	xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+	qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
+
+static void qce_setup_config(struct qce_device *qce)
+{
+	u32 config;
+
+	/* get big endianness */
+	config = qce_config_reg(qce, 0);
+
+	/* clear status */
+	qce_write(qce, REG_STATUS, 0);
+	qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+	qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+}
+
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
+				u32 totallen, u32 offset)
+{
+	struct ahash_request *req = ahash_request_cast(async_req);
+	struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
+	__be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+	__be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+	u32 auth_cfg = 0, config;
+	unsigned int iv_words;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (!rctx->last_blk && req->nbytes % blocksize)
+		return -EINVAL;
+
+	qce_setup_config(qce);
+
+	if (IS_CMAC(rctx->flags)) {
+		qce_write(qce, REG_AUTH_SEG_CFG, 0);
+		qce_write(qce, REG_ENCR_SEG_CFG, 0);
+		qce_write(qce, REG_ENCR_SEG_SIZE, 0);
+		qce_clear_array(qce, REG_AUTH_IV0, 16);
+		qce_clear_array(qce, REG_AUTH_KEY0, 16);
+		qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+		auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+	}
+
+	if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
+		u32 authkey_words = rctx->authklen / sizeof(u32);
+
+		qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
+		qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
+				authkey_words);
+	}
+
+	if (IS_CMAC(rctx->flags))
+		goto go_proc;
+
+	if (rctx->first_blk)
+		memcpy(auth, rctx->digest, digestsize);
+	else
+		qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
+
+	iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
+	qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
+
+	if (rctx->first_blk)
+		qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+	else
+		qce_write_array(qce, REG_AUTH_BYTECNT0,
+				(u32 *)rctx->byte_count, 2);
+
+	auth_cfg = qce_auth_cfg(rctx->flags, 0);
+
+	if (rctx->last_blk)
+		auth_cfg |= BIT(AUTH_LAST_SHIFT);
+	else
+		auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
+
+	if (rctx->first_blk)
+		auth_cfg |= BIT(AUTH_FIRST_SHIFT);
+	else
+		auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
+
+go_proc:
+	qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+	qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
+	qce_write(qce, REG_AUTH_SEG_START, 0);
+	qce_write(qce, REG_ENCR_SEG_CFG, 0);
+	qce_write(qce, REG_SEG_SIZE, req->nbytes);
+
+	/* get little endianness */
+	config = qce_config_reg(qce, 1);
+	qce_write(qce, REG_CONFIG, config);
+
+	qce_crypto_go(qce);
+
+	return 0;
+}
+
+static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+				     u32 totallen, u32 offset)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+	__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+	unsigned int enckey_words, enciv_words;
+	unsigned int keylen;
+	u32 encr_cfg = 0, auth_cfg = 0, config;
+	unsigned int ivsize = rctx->ivsize;
+	unsigned long flags = rctx->flags;
+
+	qce_setup_config(qce);
+
+	if (IS_XTS(flags))
+		keylen = ctx->enc_keylen / 2;
+	else
+		keylen = ctx->enc_keylen;
+
+	qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
+	enckey_words = keylen / sizeof(u32);
+
+	qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+
+	encr_cfg = qce_encr_cfg(flags, keylen);
+
+	if (IS_DES(flags)) {
+		enciv_words = 2;
+		enckey_words = 2;
+	} else if (IS_3DES(flags)) {
+		enciv_words = 2;
+		enckey_words = 6;
+	} else if (IS_AES(flags)) {
+		if (IS_XTS(flags))
+			qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
+				   rctx->cryptlen);
+		enciv_words = 4;
+	} else {
+		return -EINVAL;
+	}
+
+	qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
+
+	if (!IS_ECB(flags)) {
+		if (IS_XTS(flags))
+			qce_xts_swapiv(enciv, rctx->iv, ivsize);
+		else
+			qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
+
+		qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
+	}
+
+	if (IS_ENCRYPT(flags))
+		encr_cfg |= BIT(ENCODE_SHIFT);
+
+	qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
+	qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
+	qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+
+	if (IS_CTR(flags)) {
+		qce_write(qce, REG_CNTR_MASK, ~0);
+		qce_write(qce, REG_CNTR_MASK0, ~0);
+		qce_write(qce, REG_CNTR_MASK1, ~0);
+		qce_write(qce, REG_CNTR_MASK2, ~0);
+	}
+
+	qce_write(qce, REG_SEG_SIZE, totallen);
+
+	/* get little endianness */
+	config = qce_config_reg(qce, 1);
+	qce_write(qce, REG_CONFIG, config);
+
+	qce_crypto_go(qce);
+
+	return 0;
+}
+
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+	      u32 offset)
+{
+	switch (type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+	case CRYPTO_ALG_TYPE_AHASH:
+		return qce_setup_regs_ahash(async_req, totallen, offset);
+	default:
+		return -EINVAL;
+	}
+}
+
+#define STATUS_ERRORS	\
+		(BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
+
+int qce_check_status(struct qce_device *qce, u32 *status)
+{
+	int ret = 0;
+
+	*status = qce_read(qce, REG_STATUS);
+
+	/*
+	 * Don't use result dump status. The operation may not be complete.
+	 * Instead, use the status we just read from device. In case, we need to
+	 * use result_status from result dump the result_status needs to be byte
+	 * swapped, since we set the device to little endian.
+	 */
+	if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
+		ret = -ENXIO;
+
+	return ret;
+}
+
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
+{
+	u32 val;
+
+	val = qce_read(qce, REG_VERSION);
+	*major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
+	*minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
+	*step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
+}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
new file mode 100644
index 0000000..a4addd4
--- /dev/null
+++ b/drivers/crypto/qce/common.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#include <linux/crypto.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+
+/* key size in bytes */
+#define QCE_SHA_HMAC_KEY_SIZE		64
+#define QCE_MAX_CIPHER_KEY_SIZE		AES_KEYSIZE_256
+
+/* IV length in bytes */
+#define QCE_AES_IV_LENGTH		AES_BLOCK_SIZE
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCE_MAX_IV_SIZE			AES_BLOCK_SIZE
+
+/* maximum nonce bytes  */
+#define QCE_MAX_NONCE			16
+#define QCE_MAX_NONCE_WORDS		(QCE_MAX_NONCE / sizeof(u32))
+
+/* burst size alignment requirement */
+#define QCE_MAX_ALIGN_SIZE		64
+
+/* cipher algorithms */
+#define QCE_ALG_DES			BIT(0)
+#define QCE_ALG_3DES			BIT(1)
+#define QCE_ALG_AES			BIT(2)
+
+/* hash and hmac algorithms */
+#define QCE_HASH_SHA1			BIT(3)
+#define QCE_HASH_SHA256			BIT(4)
+#define QCE_HASH_SHA1_HMAC		BIT(5)
+#define QCE_HASH_SHA256_HMAC		BIT(6)
+#define QCE_HASH_AES_CMAC		BIT(7)
+
+/* cipher modes */
+#define QCE_MODE_CBC			BIT(8)
+#define QCE_MODE_ECB			BIT(9)
+#define QCE_MODE_CTR			BIT(10)
+#define QCE_MODE_XTS			BIT(11)
+#define QCE_MODE_CCM			BIT(12)
+#define QCE_MODE_MASK			GENMASK(12, 8)
+
+/* cipher encryption/decryption operations */
+#define QCE_ENCRYPT			BIT(13)
+#define QCE_DECRYPT			BIT(14)
+
+#define IS_DES(flags)			(flags & QCE_ALG_DES)
+#define IS_3DES(flags)			(flags & QCE_ALG_3DES)
+#define IS_AES(flags)			(flags & QCE_ALG_AES)
+
+#define IS_SHA1(flags)			(flags & QCE_HASH_SHA1)
+#define IS_SHA256(flags)		(flags & QCE_HASH_SHA256)
+#define IS_SHA1_HMAC(flags)		(flags & QCE_HASH_SHA1_HMAC)
+#define IS_SHA256_HMAC(flags)		(flags & QCE_HASH_SHA256_HMAC)
+#define IS_CMAC(flags)			(flags & QCE_HASH_AES_CMAC)
+#define IS_SHA(flags)			(IS_SHA1(flags) || IS_SHA256(flags))
+#define IS_SHA_HMAC(flags)		\
+		(IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
+
+#define IS_CBC(mode)			(mode & QCE_MODE_CBC)
+#define IS_ECB(mode)			(mode & QCE_MODE_ECB)
+#define IS_CTR(mode)			(mode & QCE_MODE_CTR)
+#define IS_XTS(mode)			(mode & QCE_MODE_XTS)
+#define IS_CCM(mode)			(mode & QCE_MODE_CCM)
+
+#define IS_ENCRYPT(dir)			(dir & QCE_ENCRYPT)
+#define IS_DECRYPT(dir)			(dir & QCE_DECRYPT)
+
+struct qce_alg_template {
+	struct list_head entry;
+	u32 crypto_alg_type;
+	unsigned long alg_flags;
+	const u32 *std_iv;
+	union {
+		struct crypto_alg crypto;
+		struct ahash_alg ahash;
+	} alg;
+	struct qce_device *qce;
+};
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
+int qce_check_status(struct qce_device *qce, u32 *status);
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+	      u32 offset);
+
+#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
new file mode 100644
index 0000000..1c3b36b
--- /dev/null
+++ b/drivers/crypto/qce/core.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+
+#include "core.h"
+#include "cipher.h"
+#include "sha.h"
+
+#define QCE_MAJOR_VERSION5	0x05
+#define QCE_QUEUE_LENGTH	1
+
+static const struct qce_algo_ops *qce_ops[] = {
+	&ablkcipher_ops,
+	&ahash_ops,
+};
+
+static void qce_unregister_algs(struct qce_device *qce)
+{
+	const struct qce_algo_ops *ops;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+		ops = qce_ops[i];
+		ops->unregister_algs(qce);
+	}
+}
+
+static int qce_register_algs(struct qce_device *qce)
+{
+	const struct qce_algo_ops *ops;
+	int i, ret = -ENODEV;
+
+	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+		ops = qce_ops[i];
+		ret = ops->register_algs(qce);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int qce_handle_request(struct crypto_async_request *async_req)
+{
+	int ret = -EINVAL, i;
+	const struct qce_algo_ops *ops;
+	u32 type = crypto_tfm_alg_type(async_req->tfm);
+
+	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+		ops = qce_ops[i];
+		if (type != ops->type)
+			continue;
+		ret = ops->async_req_handle(async_req);
+		break;
+	}
+
+	return ret;
+}
+
+static int qce_handle_queue(struct qce_device *qce,
+			    struct crypto_async_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	unsigned long flags;
+	int ret = 0, err;
+
+	spin_lock_irqsave(&qce->lock, flags);
+
+	if (req)
+		ret = crypto_enqueue_request(&qce->queue, req);
+
+	/* busy, do not dequeue request */
+	if (qce->req) {
+		spin_unlock_irqrestore(&qce->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&qce->queue);
+	async_req = crypto_dequeue_request(&qce->queue);
+	if (async_req)
+		qce->req = async_req;
+
+	spin_unlock_irqrestore(&qce->lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog) {
+		spin_lock_bh(&qce->lock);
+		backlog->complete(backlog, -EINPROGRESS);
+		spin_unlock_bh(&qce->lock);
+	}
+
+	err = qce_handle_request(async_req);
+	if (err) {
+		qce->result = err;
+		tasklet_schedule(&qce->done_tasklet);
+	}
+
+	return ret;
+}
+
+static void qce_tasklet_req_done(unsigned long data)
+{
+	struct qce_device *qce = (struct qce_device *)data;
+	struct crypto_async_request *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qce->lock, flags);
+	req = qce->req;
+	qce->req = NULL;
+	spin_unlock_irqrestore(&qce->lock, flags);
+
+	if (req)
+		req->complete(req, qce->result);
+
+	qce_handle_queue(qce, NULL);
+}
+
+static int qce_async_request_enqueue(struct qce_device *qce,
+				     struct crypto_async_request *req)
+{
+	return qce_handle_queue(qce, req);
+}
+
+static void qce_async_request_done(struct qce_device *qce, int ret)
+{
+	qce->result = ret;
+	tasklet_schedule(&qce->done_tasklet);
+}
+
+static int qce_check_version(struct qce_device *qce)
+{
+	u32 major, minor, step;
+
+	qce_get_version(qce, &major, &minor, &step);
+
+	/*
+	 * the driver does not support v5 with minor 0 because it has special
+	 * alignment requirements.
+	 */
+	if (major != QCE_MAJOR_VERSION5 || minor == 0)
+		return -ENODEV;
+
+	qce->burst_size = QCE_BAM_BURST_SIZE;
+	qce->pipe_pair_id = 1;
+
+	dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
+		major, minor, step);
+
+	return 0;
+}
+
+static int qce_crypto_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct qce_device *qce;
+	struct resource *res;
+	int ret;
+
+	qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
+	if (!qce)
+		return -ENOMEM;
+
+	qce->dev = dev;
+	platform_set_drvdata(pdev, qce);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	qce->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(qce->base))
+		return PTR_ERR(qce->base);
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret < 0)
+		return ret;
+
+	qce->core = devm_clk_get(qce->dev, "core");
+	if (IS_ERR(qce->core))
+		return PTR_ERR(qce->core);
+
+	qce->iface = devm_clk_get(qce->dev, "iface");
+	if (IS_ERR(qce->iface))
+		return PTR_ERR(qce->iface);
+
+	qce->bus = devm_clk_get(qce->dev, "bus");
+	if (IS_ERR(qce->bus))
+		return PTR_ERR(qce->bus);
+
+	ret = clk_prepare_enable(qce->core);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(qce->iface);
+	if (ret)
+		goto err_clks_core;
+
+	ret = clk_prepare_enable(qce->bus);
+	if (ret)
+		goto err_clks_iface;
+
+	ret = qce_dma_request(qce->dev, &qce->dma);
+	if (ret)
+		goto err_clks;
+
+	ret = qce_check_version(qce);
+	if (ret)
+		goto err_clks;
+
+	spin_lock_init(&qce->lock);
+	tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
+		     (unsigned long)qce);
+	crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
+
+	qce->async_req_enqueue = qce_async_request_enqueue;
+	qce->async_req_done = qce_async_request_done;
+
+	ret = qce_register_algs(qce);
+	if (ret)
+		goto err_dma;
+
+	return 0;
+
+err_dma:
+	qce_dma_release(&qce->dma);
+err_clks:
+	clk_disable_unprepare(qce->bus);
+err_clks_iface:
+	clk_disable_unprepare(qce->iface);
+err_clks_core:
+	clk_disable_unprepare(qce->core);
+	return ret;
+}
+
+static int qce_crypto_remove(struct platform_device *pdev)
+{
+	struct qce_device *qce = platform_get_drvdata(pdev);
+
+	tasklet_kill(&qce->done_tasklet);
+	qce_unregister_algs(qce);
+	qce_dma_release(&qce->dma);
+	clk_disable_unprepare(qce->bus);
+	clk_disable_unprepare(qce->iface);
+	clk_disable_unprepare(qce->core);
+	return 0;
+}
+
+static const struct of_device_id qce_crypto_of_match[] = {
+	{ .compatible = "qcom,crypto-v5.1", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
+
+static struct platform_driver qce_crypto_driver = {
+	.probe = qce_crypto_probe,
+	.remove = qce_crypto_remove,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = qce_crypto_of_match,
+	},
+};
+module_platform_driver(qce_crypto_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm crypto engine driver");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("The Linux Foundation");
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
new file mode 100644
index 0000000..549965d
--- /dev/null
+++ b/drivers/crypto/qce/core.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include "dma.h"
+
+/**
+ * struct qce_device - crypto engine device structure
+ * @queue: crypto request queue
+ * @lock: the lock protects queue and req
+ * @done_tasklet: done tasklet object
+ * @req: current active request
+ * @result: result of current transform
+ * @base: virtual IO base
+ * @dev: pointer to device structure
+ * @core: core device clock
+ * @iface: interface clock
+ * @bus: bus clock
+ * @dma: pointer to dma data
+ * @burst_size: the crypto burst size
+ * @pipe_pair_id: which pipe pair id the device using
+ * @async_req_enqueue: invoked by every algorithm to enqueue a request
+ * @async_req_done: invoked by every algorithm to finish its request
+ */
+struct qce_device {
+	struct crypto_queue queue;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+	struct crypto_async_request *req;
+	int result;
+	void __iomem *base;
+	struct device *dev;
+	struct clk *core, *iface, *bus;
+	struct qce_dma_data dma;
+	int burst_size;
+	unsigned int pipe_pair_id;
+	int (*async_req_enqueue)(struct qce_device *qce,
+				 struct crypto_async_request *req);
+	void (*async_req_done)(struct qce_device *qce, int ret);
+};
+
+/**
+ * struct qce_algo_ops - algorithm operations per crypto type
+ * @type: should be CRYPTO_ALG_TYPE_XXX
+ * @register_algs: invoked by core to register the algorithms
+ * @unregister_algs: invoked by core to unregister the algorithms
+ * @async_req_handle: invoked by core to handle enqueued request
+ */
+struct qce_algo_ops {
+	u32 type;
+	int (*register_algs)(struct qce_device *qce);
+	void (*unregister_algs)(struct qce_device *qce);
+	int (*async_req_handle)(struct crypto_async_request *async_req);
+};
+
+#endif /* _CORE_H_ */
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
new file mode 100644
index 0000000..4797e79
--- /dev/null
+++ b/drivers/crypto/qce/dma.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <crypto/scatterwalk.h>
+
+#include "dma.h"
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+{
+	int ret;
+
+	dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+	if (IS_ERR(dma->txchan))
+		return PTR_ERR(dma->txchan);
+
+	dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+	if (IS_ERR(dma->rxchan)) {
+		ret = PTR_ERR(dma->rxchan);
+		goto error_rx;
+	}
+
+	dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
+				  GFP_KERNEL);
+	if (!dma->result_buf) {
+		ret = -ENOMEM;
+		goto error_nomem;
+	}
+
+	dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
+
+	return 0;
+error_nomem:
+	dma_release_channel(dma->rxchan);
+error_rx:
+	dma_release_channel(dma->txchan);
+	return ret;
+}
+
+void qce_dma_release(struct qce_dma_data *dma)
+{
+	dma_release_channel(dma->txchan);
+	dma_release_channel(dma->rxchan);
+	kfree(dma->result_buf);
+}
+
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+{
+	struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
+
+	while (sg) {
+		if (!sg_page(sg))
+			break;
+		sg = sg_next(sg);
+	}
+
+	if (!sg)
+		return ERR_PTR(-EINVAL);
+
+	while (new_sgl && sg) {
+		sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
+			    new_sgl->offset);
+		sg_last = sg;
+		sg = sg_next(sg);
+		new_sgl = sg_next(new_sgl);
+	}
+
+	return sg_last;
+}
+
+static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
+			   int nents, unsigned long flags,
+			   enum dma_transfer_direction dir,
+			   dma_async_tx_callback cb, void *cb_param)
+{
+	struct dma_async_tx_descriptor *desc;
+	dma_cookie_t cookie;
+
+	if (!sg || !nents)
+		return -EINVAL;
+
+	desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
+	if (!desc)
+		return -EINVAL;
+
+	desc->callback = cb;
+	desc->callback_param = cb_param;
+	cookie = dmaengine_submit(desc);
+
+	return dma_submit_error(cookie);
+}
+
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
+		     int rx_nents, struct scatterlist *tx_sg, int tx_nents,
+		     dma_async_tx_callback cb, void *cb_param)
+{
+	struct dma_chan *rxchan = dma->rxchan;
+	struct dma_chan *txchan = dma->txchan;
+	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+	int ret;
+
+	ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
+			     NULL, NULL);
+	if (ret)
+		return ret;
+
+	return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
+			       cb, cb_param);
+}
+
+void qce_dma_issue_pending(struct qce_dma_data *dma)
+{
+	dma_async_issue_pending(dma->rxchan);
+	dma_async_issue_pending(dma->txchan);
+}
+
+int qce_dma_terminate_all(struct qce_dma_data *dma)
+{
+	int ret;
+
+	ret = dmaengine_terminate_all(dma->rxchan);
+	return ret ?: dmaengine_terminate_all(dma->txchan);
+}
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
new file mode 100644
index 0000000..130235d
--- /dev/null
+++ b/drivers/crypto/qce/dma.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DMA_H_
+#define _DMA_H_
+
+#include <linux/dmaengine.h>
+
+/* maximum data transfer block size between BAM and CE */
+#define QCE_BAM_BURST_SIZE		64
+
+#define QCE_AUTHIV_REGS_CNT		16
+#define QCE_AUTH_BYTECOUNT_REGS_CNT	4
+#define QCE_CNTRIV_REGS_CNT		4
+
+struct qce_result_dump {
+	u32 auth_iv[QCE_AUTHIV_REGS_CNT];
+	u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
+	u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
+	u32 status;
+	u32 status2;
+};
+
+#define QCE_IGNORE_BUF_SZ	(2 * QCE_BAM_BURST_SIZE)
+#define QCE_RESULT_BUF_SZ	\
+		ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
+
+struct qce_dma_data {
+	struct dma_chan *txchan;
+	struct dma_chan *rxchan;
+	struct qce_result_dump *result_buf;
+	void *ignore_buf;
+};
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
+void qce_dma_release(struct qce_dma_data *dma);
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
+		     int in_ents, struct scatterlist *sg_out, int out_ents,
+		     dma_async_tx_callback cb, void *cb_param);
+void qce_dma_issue_pending(struct qce_dma_data *dma);
+int qce_dma_terminate_all(struct qce_dma_data *dma);
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+
+#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h
new file mode 100644
index 0000000..f0e19e3
--- /dev/null
+++ b/drivers/crypto/qce/regs-v5.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _REGS_V5_H_
+#define _REGS_V5_H_
+
+#include <linux/bitops.h>
+
+#define REG_VERSION			0x000
+#define REG_STATUS			0x100
+#define REG_STATUS2			0x104
+#define REG_ENGINES_AVAIL		0x108
+#define REG_FIFO_SIZES			0x10c
+#define REG_SEG_SIZE			0x110
+#define REG_GOPROC			0x120
+#define REG_ENCR_SEG_CFG		0x200
+#define REG_ENCR_SEG_SIZE		0x204
+#define REG_ENCR_SEG_START		0x208
+#define REG_CNTR0_IV0			0x20c
+#define REG_CNTR1_IV1			0x210
+#define REG_CNTR2_IV2			0x214
+#define REG_CNTR3_IV3			0x218
+#define REG_CNTR_MASK			0x21C
+#define REG_ENCR_CCM_INT_CNTR0		0x220
+#define REG_ENCR_CCM_INT_CNTR1		0x224
+#define REG_ENCR_CCM_INT_CNTR2		0x228
+#define REG_ENCR_CCM_INT_CNTR3		0x22c
+#define REG_ENCR_XTS_DU_SIZE		0x230
+#define REG_CNTR_MASK2			0x234
+#define REG_CNTR_MASK1			0x238
+#define REG_CNTR_MASK0			0x23c
+#define REG_AUTH_SEG_CFG		0x300
+#define REG_AUTH_SEG_SIZE		0x304
+#define REG_AUTH_SEG_START		0x308
+#define REG_AUTH_IV0			0x310
+#define REG_AUTH_IV1			0x314
+#define REG_AUTH_IV2			0x318
+#define REG_AUTH_IV3			0x31c
+#define REG_AUTH_IV4			0x320
+#define REG_AUTH_IV5			0x324
+#define REG_AUTH_IV6			0x328
+#define REG_AUTH_IV7			0x32c
+#define REG_AUTH_IV8			0x330
+#define REG_AUTH_IV9			0x334
+#define REG_AUTH_IV10			0x338
+#define REG_AUTH_IV11			0x33c
+#define REG_AUTH_IV12			0x340
+#define REG_AUTH_IV13			0x344
+#define REG_AUTH_IV14			0x348
+#define REG_AUTH_IV15			0x34c
+#define REG_AUTH_INFO_NONCE0		0x350
+#define REG_AUTH_INFO_NONCE1		0x354
+#define REG_AUTH_INFO_NONCE2		0x358
+#define REG_AUTH_INFO_NONCE3		0x35c
+#define REG_AUTH_BYTECNT0		0x390
+#define REG_AUTH_BYTECNT1		0x394
+#define REG_AUTH_BYTECNT2		0x398
+#define REG_AUTH_BYTECNT3		0x39c
+#define REG_AUTH_EXP_MAC0		0x3a0
+#define REG_AUTH_EXP_MAC1		0x3a4
+#define REG_AUTH_EXP_MAC2		0x3a8
+#define REG_AUTH_EXP_MAC3		0x3ac
+#define REG_AUTH_EXP_MAC4		0x3b0
+#define REG_AUTH_EXP_MAC5		0x3b4
+#define REG_AUTH_EXP_MAC6		0x3b8
+#define REG_AUTH_EXP_MAC7		0x3bc
+#define REG_CONFIG			0x400
+#define REG_GOPROC_QC_KEY		0x1000
+#define REG_GOPROC_OEM_KEY		0x2000
+#define REG_ENCR_KEY0			0x3000
+#define REG_ENCR_KEY1			0x3004
+#define REG_ENCR_KEY2			0x3008
+#define REG_ENCR_KEY3			0x300c
+#define REG_ENCR_KEY4			0x3010
+#define REG_ENCR_KEY5			0x3014
+#define REG_ENCR_KEY6			0x3018
+#define REG_ENCR_KEY7			0x301c
+#define REG_ENCR_XTS_KEY0		0x3020
+#define REG_ENCR_XTS_KEY1		0x3024
+#define REG_ENCR_XTS_KEY2		0x3028
+#define REG_ENCR_XTS_KEY3		0x302c
+#define REG_ENCR_XTS_KEY4		0x3030
+#define REG_ENCR_XTS_KEY5		0x3034
+#define REG_ENCR_XTS_KEY6		0x3038
+#define REG_ENCR_XTS_KEY7		0x303c
+#define REG_AUTH_KEY0			0x3040
+#define REG_AUTH_KEY1			0x3044
+#define REG_AUTH_KEY2			0x3048
+#define REG_AUTH_KEY3			0x304c
+#define REG_AUTH_KEY4			0x3050
+#define REG_AUTH_KEY5			0x3054
+#define REG_AUTH_KEY6			0x3058
+#define REG_AUTH_KEY7			0x305c
+#define REG_AUTH_KEY8			0x3060
+#define REG_AUTH_KEY9			0x3064
+#define REG_AUTH_KEY10			0x3068
+#define REG_AUTH_KEY11			0x306c
+#define REG_AUTH_KEY12			0x3070
+#define REG_AUTH_KEY13			0x3074
+#define REG_AUTH_KEY14			0x3078
+#define REG_AUTH_KEY15			0x307c
+
+/* Register bits - REG_VERSION */
+#define CORE_STEP_REV_SHIFT		0
+#define CORE_STEP_REV_MASK		GENMASK(15, 0)
+#define CORE_MINOR_REV_SHIFT		16
+#define CORE_MINOR_REV_MASK		GENMASK(23, 16)
+#define CORE_MAJOR_REV_SHIFT		24
+#define CORE_MAJOR_REV_MASK		GENMASK(31, 24)
+
+/* Register bits - REG_STATUS */
+#define MAC_FAILED_SHIFT		31
+#define DOUT_SIZE_AVAIL_SHIFT		26
+#define DOUT_SIZE_AVAIL_MASK		GENMASK(30, 26)
+#define DIN_SIZE_AVAIL_SHIFT		21
+#define DIN_SIZE_AVAIL_MASK		GENMASK(25, 21)
+#define HSD_ERR_SHIFT			20
+#define ACCESS_VIOL_SHIFT		19
+#define PIPE_ACTIVE_ERR_SHIFT		18
+#define CFG_CHNG_ERR_SHIFT		17
+#define DOUT_ERR_SHIFT			16
+#define DIN_ERR_SHIFT			15
+#define AXI_ERR_SHIFT			14
+#define CRYPTO_STATE_SHIFT		10
+#define CRYPTO_STATE_MASK		GENMASK(13, 10)
+#define ENCR_BUSY_SHIFT			9
+#define AUTH_BUSY_SHIFT			8
+#define DOUT_INTR_SHIFT			7
+#define DIN_INTR_SHIFT			6
+#define OP_DONE_INTR_SHIFT		5
+#define ERR_INTR_SHIFT			4
+#define DOUT_RDY_SHIFT			3
+#define DIN_RDY_SHIFT			2
+#define OPERATION_DONE_SHIFT		1
+#define SW_ERR_SHIFT			0
+
+/* Register bits - REG_STATUS2 */
+#define AXI_EXTRA_SHIFT			1
+#define LOCKED_SHIFT			2
+
+/* Register bits - REG_CONFIG */
+#define REQ_SIZE_SHIFT			17
+#define REQ_SIZE_MASK			GENMASK(20, 17)
+#define REQ_SIZE_ENUM_1_BEAT		0
+#define REQ_SIZE_ENUM_2_BEAT		1
+#define REQ_SIZE_ENUM_3_BEAT		2
+#define REQ_SIZE_ENUM_4_BEAT		3
+#define REQ_SIZE_ENUM_5_BEAT		4
+#define REQ_SIZE_ENUM_6_BEAT		5
+#define REQ_SIZE_ENUM_7_BEAT		6
+#define REQ_SIZE_ENUM_8_BEAT		7
+#define REQ_SIZE_ENUM_9_BEAT		8
+#define REQ_SIZE_ENUM_10_BEAT		9
+#define REQ_SIZE_ENUM_11_BEAT		10
+#define REQ_SIZE_ENUM_12_BEAT		11
+#define REQ_SIZE_ENUM_13_BEAT		12
+#define REQ_SIZE_ENUM_14_BEAT		13
+#define REQ_SIZE_ENUM_15_BEAT		14
+#define REQ_SIZE_ENUM_16_BEAT		15
+
+#define MAX_QUEUED_REQ_SHIFT		14
+#define MAX_QUEUED_REQ_MASK		GENMASK(24, 16)
+#define ENUM_1_QUEUED_REQS		0
+#define ENUM_2_QUEUED_REQS		1
+#define ENUM_3_QUEUED_REQS		2
+
+#define IRQ_ENABLES_SHIFT		10
+#define IRQ_ENABLES_MASK		GENMASK(13, 10)
+
+#define LITTLE_ENDIAN_MODE_SHIFT	9
+#define PIPE_SET_SELECT_SHIFT		5
+#define PIPE_SET_SELECT_MASK		GENMASK(8, 5)
+
+#define HIGH_SPD_EN_N_SHIFT		4
+#define MASK_DOUT_INTR_SHIFT		3
+#define MASK_DIN_INTR_SHIFT		2
+#define MASK_OP_DONE_INTR_SHIFT		1
+#define MASK_ERR_INTR_SHIFT		0
+
+/* Register bits - REG_AUTH_SEG_CFG */
+#define COMP_EXP_MAC_SHIFT		24
+#define COMP_EXP_MAC_DISABLED		0
+#define COMP_EXP_MAC_ENABLED		1
+
+#define F9_DIRECTION_SHIFT		23
+#define F9_DIRECTION_UPLINK		0
+#define F9_DIRECTION_DOWNLINK		1
+
+#define AUTH_NONCE_NUM_WORDS_SHIFT	20
+#define AUTH_NONCE_NUM_WORDS_MASK	GENMASK(22, 20)
+
+#define USE_PIPE_KEY_AUTH_SHIFT		19
+#define USE_HW_KEY_AUTH_SHIFT		18
+#define AUTH_FIRST_SHIFT		17
+#define AUTH_LAST_SHIFT			16
+
+#define AUTH_POS_SHIFT			14
+#define AUTH_POS_MASK			GENMASK(15, 14)
+#define AUTH_POS_BEFORE			0
+#define AUTH_POS_AFTER			1
+
+#define AUTH_SIZE_SHIFT			9
+#define AUTH_SIZE_MASK			GENMASK(13, 9)
+#define AUTH_SIZE_SHA1			0
+#define AUTH_SIZE_SHA256		1
+#define AUTH_SIZE_ENUM_1_BYTES		0
+#define AUTH_SIZE_ENUM_2_BYTES		1
+#define AUTH_SIZE_ENUM_3_BYTES		2
+#define AUTH_SIZE_ENUM_4_BYTES		3
+#define AUTH_SIZE_ENUM_5_BYTES		4
+#define AUTH_SIZE_ENUM_6_BYTES		5
+#define AUTH_SIZE_ENUM_7_BYTES		6
+#define AUTH_SIZE_ENUM_8_BYTES		7
+#define AUTH_SIZE_ENUM_9_BYTES		8
+#define AUTH_SIZE_ENUM_10_BYTES		9
+#define AUTH_SIZE_ENUM_11_BYTES		10
+#define AUTH_SIZE_ENUM_12_BYTES		11
+#define AUTH_SIZE_ENUM_13_BYTES		12
+#define AUTH_SIZE_ENUM_14_BYTES		13
+#define AUTH_SIZE_ENUM_15_BYTES		14
+#define AUTH_SIZE_ENUM_16_BYTES		15
+
+#define AUTH_MODE_SHIFT			6
+#define AUTH_MODE_MASK			GENMASK(8, 6)
+#define AUTH_MODE_HASH			0
+#define AUTH_MODE_HMAC			1
+#define AUTH_MODE_CCM			0
+#define AUTH_MODE_CMAC			1
+
+#define AUTH_KEY_SIZE_SHIFT		3
+#define AUTH_KEY_SIZE_MASK		GENMASK(5, 3)
+#define AUTH_KEY_SZ_AES128		0
+#define AUTH_KEY_SZ_AES256		2
+
+#define AUTH_ALG_SHIFT			0
+#define AUTH_ALG_MASK			GENMASK(2, 0)
+#define AUTH_ALG_NONE			0
+#define AUTH_ALG_SHA			1
+#define AUTH_ALG_AES			2
+#define AUTH_ALG_KASUMI			3
+#define AUTH_ALG_SNOW3G			4
+#define AUTH_ALG_ZUC			5
+
+/* Register bits - REG_ENCR_XTS_DU_SIZE */
+#define ENCR_XTS_DU_SIZE_SHIFT		0
+#define ENCR_XTS_DU_SIZE_MASK		GENMASK(19, 0)
+
+/* Register bits - REG_ENCR_SEG_CFG */
+#define F8_KEYSTREAM_ENABLE_SHIFT	17
+#define F8_KEYSTREAM_DISABLED		0
+#define F8_KEYSTREAM_ENABLED		1
+
+#define F8_DIRECTION_SHIFT		16
+#define F8_DIRECTION_UPLINK		0
+#define F8_DIRECTION_DOWNLINK		1
+
+#define USE_PIPE_KEY_ENCR_SHIFT		15
+#define USE_PIPE_KEY_ENCR_ENABLED	1
+#define USE_KEY_REGISTERS		0
+
+#define USE_HW_KEY_ENCR_SHIFT		14
+#define USE_KEY_REG			0
+#define USE_HW_KEY			1
+
+#define LAST_CCM_SHIFT			13
+#define LAST_CCM_XFR			1
+#define INTERM_CCM_XFR			0
+
+#define CNTR_ALG_SHIFT			11
+#define CNTR_ALG_MASK			GENMASK(12, 11)
+#define CNTR_ALG_NIST			0
+
+#define ENCODE_SHIFT			10
+
+#define ENCR_MODE_SHIFT			6
+#define ENCR_MODE_MASK			GENMASK(9, 6)
+#define ENCR_MODE_ECB			0
+#define ENCR_MODE_CBC			1
+#define ENCR_MODE_CTR			2
+#define ENCR_MODE_XTS			3
+#define ENCR_MODE_CCM			4
+
+#define ENCR_KEY_SZ_SHIFT		3
+#define ENCR_KEY_SZ_MASK		GENMASK(5, 3)
+#define ENCR_KEY_SZ_DES			0
+#define ENCR_KEY_SZ_3DES		1
+#define ENCR_KEY_SZ_AES128		0
+#define ENCR_KEY_SZ_AES256		2
+
+#define ENCR_ALG_SHIFT			0
+#define ENCR_ALG_MASK			GENMASK(2, 0)
+#define ENCR_ALG_NONE			0
+#define ENCR_ALG_DES			1
+#define ENCR_ALG_AES			2
+#define ENCR_ALG_KASUMI			4
+#define ENCR_ALG_SNOW_3G		5
+#define ENCR_ALG_ZUC			6
+
+/* Register bits - REG_GOPROC */
+#define GO_SHIFT			0
+#define CLR_CNTXT_SHIFT			1
+#define RESULTS_DUMP_SHIFT		2
+
+/* Register bits - REG_ENGINES_AVAIL */
+#define ENCR_AES_SEL_SHIFT		0
+#define DES_SEL_SHIFT			1
+#define ENCR_SNOW3G_SEL_SHIFT		2
+#define ENCR_KASUMI_SEL_SHIFT		3
+#define SHA_SEL_SHIFT			4
+#define SHA512_SEL_SHIFT		5
+#define AUTH_AES_SEL_SHIFT		6
+#define AUTH_SNOW3G_SEL_SHIFT		7
+#define AUTH_KASUMI_SEL_SHIFT		8
+#define BAM_PIPE_SETS_SHIFT		9
+#define BAM_PIPE_SETS_MASK		GENMASK(12, 9)
+#define AXI_WR_BEATS_SHIFT		13
+#define AXI_WR_BEATS_MASK		GENMASK(18, 13)
+#define AXI_RD_BEATS_SHIFT		19
+#define AXI_RD_BEATS_MASK		GENMASK(24, 19)
+#define ENCR_ZUC_SEL_SHIFT		26
+#define AUTH_ZUC_SEL_SHIFT		27
+#define ZUC_ENABLE_SHIFT		28
+
+#endif /* _REGS_V5_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
new file mode 100644
index 0000000..d8a5db1
--- /dev/null
+++ b/drivers/crypto/qce/sha.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <crypto/internal/hash.h>
+
+#include "common.h"
+#include "core.h"
+#include "sha.h"
+
+/* crypto hw padding constant for first operation */
+#define SHA_PADDING		64
+#define SHA_PADDING_MASK	(SHA_PADDING - 1)
+
+static LIST_HEAD(ahash_algs);
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+	SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+	SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+	SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static void qce_ahash_done(void *data)
+{
+	struct crypto_async_request *async_req = data;
+	struct ahash_request *req = ahash_request_cast(async_req);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	struct qce_result_dump *result = qce->dma.result_buf;
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	int error;
+	u32 status;
+
+	error = qce_dma_terminate_all(&qce->dma);
+	if (error)
+		dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
+
+	dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+	dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
+
+	memcpy(rctx->digest, result->auth_iv, digestsize);
+	if (req->result)
+		memcpy(req->result, result->auth_iv, digestsize);
+
+	rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
+	rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);
+
+	error = qce_check_status(qce, &status);
+	if (error < 0)
+		dev_dbg(qce->dev, "ahash operation error (%x)\n", status);
+
+	req->src = rctx->src_orig;
+	req->nbytes = rctx->nbytes_orig;
+	rctx->last_blk = false;
+	rctx->first_blk = false;
+
+	qce->async_req_done(tmpl->qce, error);
+}
+
+static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
+{
+	struct ahash_request *req = ahash_request_cast(async_req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+	struct qce_device *qce = tmpl->qce;
+	unsigned long flags = rctx->flags;
+	int ret;
+
+	if (IS_SHA_HMAC(flags)) {
+		rctx->authkey = ctx->authkey;
+		rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
+	} else if (IS_CMAC(flags)) {
+		rctx->authkey = ctx->authkey;
+		rctx->authklen = AES_KEYSIZE_128;
+	}
+
+	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (rctx->src_nents < 0) {
+		dev_err(qce->dev, "Invalid numbers of src SG.\n");
+		return rctx->src_nents;
+	}
+
+	ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+	if (ret < 0)
+		return ret;
+
+	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+	ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
+	if (ret < 0)
+		goto error_unmap_src;
+
+	ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
+			       &rctx->result_sg, 1, qce_ahash_done, async_req);
+	if (ret)
+		goto error_unmap_dst;
+
+	qce_dma_issue_pending(&qce->dma);
+
+	ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+	if (ret)
+		goto error_terminate;
+
+	return 0;
+
+error_terminate:
+	qce_dma_terminate_all(&qce->dma);
+error_unmap_dst:
+	dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
+error_unmap_src:
+	dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+	return ret;
+}
+
+static int qce_ahash_init(struct ahash_request *req)
+{
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+	const u32 *std_iv = tmpl->std_iv;
+
+	memset(rctx, 0, sizeof(*rctx));
+	rctx->first_blk = true;
+	rctx->last_blk = false;
+	rctx->flags = tmpl->alg_flags;
+	memcpy(rctx->digest, std_iv, sizeof(rctx->digest));
+
+	return 0;
+}
+
+static int qce_ahash_export(struct ahash_request *req, void *out)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	unsigned long flags = rctx->flags;
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+
+	if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+		struct sha1_state *out_state = out;
+
+		out_state->count = rctx->count;
+		qce_cpu_to_be32p_array((__be32 *)out_state->state,
+				       rctx->digest, digestsize);
+		memcpy(out_state->buffer, rctx->buf, blocksize);
+	} else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+		struct sha256_state *out_state = out;
+
+		out_state->count = rctx->count;
+		qce_cpu_to_be32p_array((__be32 *)out_state->state,
+				       rctx->digest, digestsize);
+		memcpy(out_state->buf, rctx->buf, blocksize);
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qce_import_common(struct ahash_request *req, u64 in_count,
+			     const u32 *state, const u8 *buffer, bool hmac)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	unsigned int digestsize = crypto_ahash_digestsize(ahash);
+	unsigned int blocksize;
+	u64 count = in_count;
+
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+	rctx->count = in_count;
+	memcpy(rctx->buf, buffer, blocksize);
+
+	if (in_count <= blocksize) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * For HMAC, there is a hardware padding done when first block
+		 * is set. Therefore the byte_count must be incremened by 64
+		 * after the first block operation.
+		 */
+		if (hmac)
+			count += SHA_PADDING;
+	}
+
+	rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
+	rctx->byte_count[1] = (__force __be32)(count >> 32);
+	qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
+			       digestsize);
+	rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+
+	return 0;
+}
+
+static int qce_ahash_import(struct ahash_request *req, const void *in)
+{
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	unsigned long flags = rctx->flags;
+	bool hmac = IS_SHA_HMAC(flags);
+	int ret = -EINVAL;
+
+	if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+		const struct sha1_state *state = in;
+
+		ret = qce_import_common(req, state->count, state->state,
+					state->buffer, hmac);
+	} else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+		const struct sha256_state *state = in;
+
+		ret = qce_import_common(req, state->count, state->state,
+					state->buf, hmac);
+	}
+
+	return ret;
+}
+
+static int qce_ahash_update(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+	struct qce_device *qce = tmpl->qce;
+	struct scatterlist *sg_last, *sg;
+	unsigned int total, len;
+	unsigned int hash_later;
+	unsigned int nbytes;
+	unsigned int blocksize;
+
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	rctx->count += req->nbytes;
+
+	/* check for buffer from previous updates and append it */
+	total = req->nbytes + rctx->buflen;
+
+	if (total <= blocksize) {
+		scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src,
+					 0, req->nbytes, 0);
+		rctx->buflen += req->nbytes;
+		return 0;
+	}
+
+	/* save the original req structure fields */
+	rctx->src_orig = req->src;
+	rctx->nbytes_orig = req->nbytes;
+
+	/*
+	 * if we have data from previous update copy them on buffer. The old
+	 * data will be combined with current request bytes.
+	 */
+	if (rctx->buflen)
+		memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+
+	/* calculate how many bytes will be hashed later */
+	hash_later = total % blocksize;
+	if (hash_later) {
+		unsigned int src_offset = req->nbytes - hash_later;
+		scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
+					 hash_later, 0);
+	}
+
+	/* here nbytes is multiple of blocksize */
+	nbytes = total - hash_later;
+
+	len = rctx->buflen;
+	sg = sg_last = req->src;
+
+	while (len < nbytes && sg) {
+		if (len + sg_dma_len(sg) > nbytes)
+			break;
+		len += sg_dma_len(sg);
+		sg_last = sg;
+		sg = sg_next(sg);
+	}
+
+	if (!sg_last)
+		return -EINVAL;
+
+	sg_mark_end(sg_last);
+
+	if (rctx->buflen) {
+		sg_init_table(rctx->sg, 2);
+		sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
+		sg_chain(rctx->sg, 2, req->src);
+		req->src = rctx->sg;
+	}
+
+	req->nbytes = nbytes;
+	rctx->buflen = hash_later;
+
+	return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_final(struct ahash_request *req)
+{
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+	struct qce_device *qce = tmpl->qce;
+
+	if (!rctx->buflen)
+		return 0;
+
+	rctx->last_blk = true;
+
+	rctx->src_orig = req->src;
+	rctx->nbytes_orig = req->nbytes;
+
+	memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+	sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen);
+
+	req->src = rctx->sg;
+	req->nbytes = rctx->buflen;
+
+	return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_digest(struct ahash_request *req)
+{
+	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+	struct qce_device *qce = tmpl->qce;
+	int ret;
+
+	ret = qce_ahash_init(req);
+	if (ret)
+		return ret;
+
+	rctx->src_orig = req->src;
+	rctx->nbytes_orig = req->nbytes;
+	rctx->first_blk = true;
+	rctx->last_blk = true;
+
+	return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	unsigned int digestsize = crypto_ahash_digestsize(tfm);
+	struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+	struct crypto_wait wait;
+	struct ahash_request *req;
+	struct scatterlist sg;
+	unsigned int blocksize;
+	struct crypto_ahash *ahash_tfm;
+	u8 *buf;
+	int ret;
+	const char *alg_name;
+
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	memset(ctx->authkey, 0, sizeof(ctx->authkey));
+
+	if (keylen <= blocksize) {
+		memcpy(ctx->authkey, key, keylen);
+		return 0;
+	}
+
+	if (digestsize == SHA1_DIGEST_SIZE)
+		alg_name = "sha1-qce";
+	else if (digestsize == SHA256_DIGEST_SIZE)
+		alg_name = "sha256-qce";
+	else
+		return -EINVAL;
+
+	ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+	if (IS_ERR(ahash_tfm))
+		return PTR_ERR(ahash_tfm);
+
+	req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+	if (!req) {
+		ret = -ENOMEM;
+		goto err_free_ahash;
+	}
+
+	crypto_init_wait(&wait);
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   crypto_req_done, &wait);
+	crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+	buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_free_req;
+	}
+
+	memcpy(buf, key, keylen);
+	sg_init_one(&sg, buf, keylen);
+	ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
+
+	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+	if (ret)
+		crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+	kfree(buf);
+err_free_req:
+	ahash_request_free(req);
+err_free_ahash:
+	crypto_free_ahash(ahash_tfm);
+	return ret;
+}
+
+static int qce_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx));
+	memset(ctx, 0, sizeof(*ctx));
+	return 0;
+}
+
+struct qce_ahash_def {
+	unsigned long flags;
+	const char *name;
+	const char *drv_name;
+	unsigned int digestsize;
+	unsigned int blocksize;
+	unsigned int statesize;
+	const u32 *std_iv;
+};
+
+static const struct qce_ahash_def ahash_def[] = {
+	{
+		.flags		= QCE_HASH_SHA1,
+		.name		= "sha1",
+		.drv_name	= "sha1-qce",
+		.digestsize	= SHA1_DIGEST_SIZE,
+		.blocksize	= SHA1_BLOCK_SIZE,
+		.statesize	= sizeof(struct sha1_state),
+		.std_iv		= std_iv_sha1,
+	},
+	{
+		.flags		= QCE_HASH_SHA256,
+		.name		= "sha256",
+		.drv_name	= "sha256-qce",
+		.digestsize	= SHA256_DIGEST_SIZE,
+		.blocksize	= SHA256_BLOCK_SIZE,
+		.statesize	= sizeof(struct sha256_state),
+		.std_iv		= std_iv_sha256,
+	},
+	{
+		.flags		= QCE_HASH_SHA1_HMAC,
+		.name		= "hmac(sha1)",
+		.drv_name	= "hmac-sha1-qce",
+		.digestsize	= SHA1_DIGEST_SIZE,
+		.blocksize	= SHA1_BLOCK_SIZE,
+		.statesize	= sizeof(struct sha1_state),
+		.std_iv		= std_iv_sha1,
+	},
+	{
+		.flags		= QCE_HASH_SHA256_HMAC,
+		.name		= "hmac(sha256)",
+		.drv_name	= "hmac-sha256-qce",
+		.digestsize	= SHA256_DIGEST_SIZE,
+		.blocksize	= SHA256_BLOCK_SIZE,
+		.statesize	= sizeof(struct sha256_state),
+		.std_iv		= std_iv_sha256,
+	},
+};
+
+static int qce_ahash_register_one(const struct qce_ahash_def *def,
+				  struct qce_device *qce)
+{
+	struct qce_alg_template *tmpl;
+	struct ahash_alg *alg;
+	struct crypto_alg *base;
+	int ret;
+
+	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+	if (!tmpl)
+		return -ENOMEM;
+
+	tmpl->std_iv = def->std_iv;
+
+	alg = &tmpl->alg.ahash;
+	alg->init = qce_ahash_init;
+	alg->update = qce_ahash_update;
+	alg->final = qce_ahash_final;
+	alg->digest = qce_ahash_digest;
+	alg->export = qce_ahash_export;
+	alg->import = qce_ahash_import;
+	if (IS_SHA_HMAC(def->flags))
+		alg->setkey = qce_ahash_hmac_setkey;
+	alg->halg.digestsize = def->digestsize;
+	alg->halg.statesize = def->statesize;
+
+	base = &alg->halg.base;
+	base->cra_blocksize = def->blocksize;
+	base->cra_priority = 300;
+	base->cra_flags = CRYPTO_ALG_ASYNC;
+	base->cra_ctxsize = sizeof(struct qce_sha_ctx);
+	base->cra_alignmask = 0;
+	base->cra_module = THIS_MODULE;
+	base->cra_init = qce_ahash_cra_init;
+	INIT_LIST_HEAD(&base->cra_list);
+
+	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->drv_name);
+
+	INIT_LIST_HEAD(&tmpl->entry);
+	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
+	tmpl->alg_flags = def->flags;
+	tmpl->qce = qce;
+
+	ret = crypto_register_ahash(alg);
+	if (ret) {
+		kfree(tmpl);
+		dev_err(qce->dev, "%s registration failed\n", base->cra_name);
+		return ret;
+	}
+
+	list_add_tail(&tmpl->entry, &ahash_algs);
+	dev_dbg(qce->dev, "%s is registered\n", base->cra_name);
+	return 0;
+}
+
+static void qce_ahash_unregister(struct qce_device *qce)
+{
+	struct qce_alg_template *tmpl, *n;
+
+	list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) {
+		crypto_unregister_ahash(&tmpl->alg.ahash);
+		list_del(&tmpl->entry);
+		kfree(tmpl);
+	}
+}
+
+static int qce_ahash_register(struct qce_device *qce)
+{
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(ahash_def); i++) {
+		ret = qce_ahash_register_one(&ahash_def[i], qce);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+err:
+	qce_ahash_unregister(qce);
+	return ret;
+}
+
+const struct qce_algo_ops ahash_ops = {
+	.type = CRYPTO_ALG_TYPE_AHASH,
+	.register_algs = qce_ahash_register,
+	.unregister_algs = qce_ahash_unregister,
+	.async_req_handle = qce_ahash_async_req_handle,
+};
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
new file mode 100644
index 0000000..236bb5e
--- /dev/null
+++ b/drivers/crypto/qce/sha.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SHA_H_
+#define _SHA_H_
+
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_SHA_MAX_BLOCKSIZE		SHA256_BLOCK_SIZE
+#define QCE_SHA_MAX_DIGESTSIZE		SHA256_DIGEST_SIZE
+
+struct qce_sha_ctx {
+	u8 authkey[QCE_SHA_MAX_BLOCKSIZE];
+};
+
+/**
+ * struct qce_sha_reqctx - holds private ahash objects per request
+ * @buf: used during update, import and export
+ * @tmpbuf: buffer for internal use
+ * @digest: calculated digest buffer
+ * @buflen: length of the buffer
+ * @flags: operation flags
+ * @src_orig: original request sg list
+ * @nbytes_orig: original request number of bytes
+ * @src_nents: source number of entries
+ * @byte_count: byte count
+ * @count: save count in states during update, import and export
+ * @first_blk: is it the first block
+ * @last_blk: is it the last block
+ * @sg: used to chain sg lists
+ * @authkey: pointer to auth key in sha ctx
+ * @authklen: auth key length
+ * @result_sg: scatterlist used for result buffer
+ */
+struct qce_sha_reqctx {
+	u8 buf[QCE_SHA_MAX_BLOCKSIZE];
+	u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE];
+	u8 digest[QCE_SHA_MAX_DIGESTSIZE];
+	unsigned int buflen;
+	unsigned long flags;
+	struct scatterlist *src_orig;
+	unsigned int nbytes_orig;
+	int src_nents;
+	__be32 byte_count[2];
+	u64 count;
+	bool first_blk;
+	bool last_blk;
+	struct scatterlist sg[2];
+	u8 *authkey;
+	unsigned int authklen;
+	struct scatterlist result_sg;
+};
+
+static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
+					     struct ahash_alg, halg);
+
+	return container_of(alg, struct qce_alg_template, alg.ahash);
+}
+
+extern const struct qce_algo_ops ahash_ops;
+
+#endif /* _SHA_H_ */
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
new file mode 100644
index 0000000..e54249c
--- /dev/null
+++ b/drivers/crypto/qcom-rng.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-18 Linaro Limited
+//
+// Based on msm-rng.c and downstream driver
+
+#include <crypto/internal/rng.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT		0x0000
+#define PRNG_STATUS		0x0004
+#define PRNG_LFSR_CFG		0x0100
+#define PRNG_CONFIG		0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK	0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS	0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE	BIT(1)
+#define PRNG_STATUS_DATA_AVAIL	BIT(0)
+
+#define WORD_SZ			4
+
+struct qcom_rng {
+	struct mutex lock;
+	void __iomem *base;
+	struct clk *clk;
+	unsigned int skip_init;
+};
+
+struct qcom_rng_ctx {
+	struct qcom_rng *rng;
+};
+
+static struct qcom_rng *qcom_rng_dev;
+
+static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
+{
+	unsigned int currsize = 0;
+	u32 val;
+
+	/* read random data from hardware */
+	do {
+		val = readl_relaxed(rng->base + PRNG_STATUS);
+		if (!(val & PRNG_STATUS_DATA_AVAIL))
+			break;
+
+		val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+		if (!val)
+			break;
+
+		if ((max - currsize) >= WORD_SZ) {
+			memcpy(data, &val, WORD_SZ);
+			data += WORD_SZ;
+			currsize += WORD_SZ;
+		} else {
+			/* copy only remaining bytes */
+			memcpy(data, &val, max - currsize);
+			break;
+		}
+	} while (currsize < max);
+
+	return currsize;
+}
+
+static int qcom_rng_generate(struct crypto_rng *tfm,
+			     const u8 *src, unsigned int slen,
+			     u8 *dstn, unsigned int dlen)
+{
+	struct qcom_rng_ctx *ctx = crypto_rng_ctx(tfm);
+	struct qcom_rng *rng = ctx->rng;
+	int ret;
+
+	ret = clk_prepare_enable(rng->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&rng->lock);
+
+	ret = qcom_rng_read(rng, dstn, dlen);
+
+	mutex_unlock(&rng->lock);
+	clk_disable_unprepare(rng->clk);
+
+	return 0;
+}
+
+static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+			 unsigned int slen)
+{
+	return 0;
+}
+
+static int qcom_rng_enable(struct qcom_rng *rng)
+{
+	u32 val;
+	int ret;
+
+	ret = clk_prepare_enable(rng->clk);
+	if (ret)
+		return ret;
+
+	/* Enable PRNG only if it is not already enabled */
+	val = readl_relaxed(rng->base + PRNG_CONFIG);
+	if (val & PRNG_CONFIG_HW_ENABLE)
+		goto already_enabled;
+
+	val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
+	val &= ~PRNG_LFSR_CFG_MASK;
+	val |= PRNG_LFSR_CFG_CLOCKS;
+	writel(val, rng->base + PRNG_LFSR_CFG);
+
+	val = readl_relaxed(rng->base + PRNG_CONFIG);
+	val |= PRNG_CONFIG_HW_ENABLE;
+	writel(val, rng->base + PRNG_CONFIG);
+
+already_enabled:
+	clk_disable_unprepare(rng->clk);
+
+	return 0;
+}
+
+static int qcom_rng_init(struct crypto_tfm *tfm)
+{
+	struct qcom_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->rng = qcom_rng_dev;
+
+	if (!ctx->rng->skip_init)
+		return qcom_rng_enable(ctx->rng);
+
+	return 0;
+}
+
+static struct rng_alg qcom_rng_alg = {
+	.generate	= qcom_rng_generate,
+	.seed		= qcom_rng_seed,
+	.seedsize	= 0,
+	.base		= {
+		.cra_name		= "stdrng",
+		.cra_driver_name	= "qcom-rng",
+		.cra_flags		= CRYPTO_ALG_TYPE_RNG,
+		.cra_priority		= 300,
+		.cra_ctxsize		= sizeof(struct qcom_rng_ctx),
+		.cra_module		= THIS_MODULE,
+		.cra_init		= qcom_rng_init,
+	}
+};
+
+static int qcom_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct qcom_rng *rng;
+	int ret;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, rng);
+	mutex_init(&rng->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rng->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rng->base))
+		return PTR_ERR(rng->base);
+
+	/* ACPI systems have clk already on, so skip clk_get */
+	if (!has_acpi_companion(&pdev->dev)) {
+		rng->clk = devm_clk_get(&pdev->dev, "core");
+		if (IS_ERR(rng->clk))
+			return PTR_ERR(rng->clk);
+	}
+
+
+	rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
+
+	qcom_rng_dev = rng;
+	ret = crypto_register_rng(&qcom_rng_alg);
+	if (ret) {
+		dev_err(&pdev->dev, "Register crypto rng failed: %d\n", ret);
+		qcom_rng_dev = NULL;
+	}
+
+	return ret;
+}
+
+static int qcom_rng_remove(struct platform_device *pdev)
+{
+	crypto_unregister_rng(&qcom_rng_alg);
+
+	qcom_rng_dev = NULL;
+
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qcom_rng_acpi_match[] = {
+	{ .id = "QCOM8160", .driver_data = 1 },
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
+#endif
+
+static const struct of_device_id qcom_rng_of_match[] = {
+	{ .compatible = "qcom,prng", .data = (void *)0},
+	{ .compatible = "qcom,prng-ee", .data = (void *)1},
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
+
+static struct platform_driver qcom_rng_driver = {
+	.probe = qcom_rng_probe,
+	.remove =  qcom_rng_remove,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = of_match_ptr(qcom_rng_of_match),
+		.acpi_match_table = ACPI_PTR(qcom_rng_acpi_match),
+	}
+};
+module_platform_driver(qcom_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_DESCRIPTION("Qualcomm random number generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
new file mode 100644
index 0000000..30f9129
--- /dev/null
+++ b/drivers/crypto/rockchip/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
+rk_crypto-objs := rk3288_crypto.o \
+		  rk3288_crypto_ablkcipher.o \
+		  rk3288_crypto_ahash.o
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
new file mode 100644
index 0000000..c9d622a
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -0,0 +1,448 @@
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+
+#include "rk3288_crypto.h"
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/reset.h>
+
+static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
+{
+	int err;
+
+	err = clk_prepare_enable(dev->sclk);
+	if (err) {
+		dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
+			__func__, __LINE__);
+		goto err_return;
+	}
+	err = clk_prepare_enable(dev->aclk);
+	if (err) {
+		dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
+			__func__, __LINE__);
+		goto err_aclk;
+	}
+	err = clk_prepare_enable(dev->hclk);
+	if (err) {
+		dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
+			__func__, __LINE__);
+		goto err_hclk;
+	}
+	err = clk_prepare_enable(dev->dmaclk);
+	if (err) {
+		dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
+			__func__, __LINE__);
+		goto err_dmaclk;
+	}
+	return err;
+err_dmaclk:
+	clk_disable_unprepare(dev->hclk);
+err_hclk:
+	clk_disable_unprepare(dev->aclk);
+err_aclk:
+	clk_disable_unprepare(dev->sclk);
+err_return:
+	return err;
+}
+
+static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
+{
+	clk_disable_unprepare(dev->dmaclk);
+	clk_disable_unprepare(dev->hclk);
+	clk_disable_unprepare(dev->aclk);
+	clk_disable_unprepare(dev->sclk);
+}
+
+static int check_alignment(struct scatterlist *sg_src,
+			   struct scatterlist *sg_dst,
+			   int align_mask)
+{
+	int in, out, align;
+
+	in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
+	     IS_ALIGNED((uint32_t)sg_src->length, align_mask);
+	if (!sg_dst)
+		return in;
+	out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
+	      IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
+	align = in && out;
+
+	return (align && (sg_src->length == sg_dst->length));
+}
+
+static int rk_load_data(struct rk_crypto_info *dev,
+			struct scatterlist *sg_src,
+			struct scatterlist *sg_dst)
+{
+	unsigned int count;
+
+	dev->aligned = dev->aligned ?
+		check_alignment(sg_src, sg_dst, dev->align_size) :
+		dev->aligned;
+	if (dev->aligned) {
+		count = min(dev->left_bytes, sg_src->length);
+		dev->left_bytes -= count;
+
+		if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
+			dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		dev->addr_in = sg_dma_address(sg_src);
+
+		if (sg_dst) {
+			if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
+				dev_err(dev->dev,
+					"[%s:%d] dma_map_sg(dst)  error\n",
+					__func__, __LINE__);
+				dma_unmap_sg(dev->dev, sg_src, 1,
+					     DMA_TO_DEVICE);
+				return -EINVAL;
+			}
+			dev->addr_out = sg_dma_address(sg_dst);
+		}
+	} else {
+		count = (dev->left_bytes > PAGE_SIZE) ?
+			PAGE_SIZE : dev->left_bytes;
+
+		if (!sg_pcopy_to_buffer(dev->first, dev->nents,
+					dev->addr_vir, count,
+					dev->total - dev->left_bytes)) {
+			dev_err(dev->dev, "[%s:%d] pcopy err\n",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+		dev->left_bytes -= count;
+		sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
+		if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
+			dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
+				__func__, __LINE__);
+			return -ENOMEM;
+		}
+		dev->addr_in = sg_dma_address(&dev->sg_tmp);
+
+		if (sg_dst) {
+			if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
+					DMA_FROM_DEVICE)) {
+				dev_err(dev->dev,
+					"[%s:%d] dma_map_sg(sg_tmp)  error\n",
+					__func__, __LINE__);
+				dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
+					     DMA_TO_DEVICE);
+				return -ENOMEM;
+			}
+			dev->addr_out = sg_dma_address(&dev->sg_tmp);
+		}
+	}
+	dev->count = count;
+	return 0;
+}
+
+static void rk_unload_data(struct rk_crypto_info *dev)
+{
+	struct scatterlist *sg_in, *sg_out;
+
+	sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
+	dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
+
+	if (dev->sg_dst) {
+		sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
+		dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
+	}
+}
+
+static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
+{
+	struct rk_crypto_info *dev  = platform_get_drvdata(dev_id);
+	u32 interrupt_status;
+
+	spin_lock(&dev->lock);
+	interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
+	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+
+	if (interrupt_status & 0x0a) {
+		dev_warn(dev->dev, "DMA Error\n");
+		dev->err = -EFAULT;
+	}
+	tasklet_schedule(&dev->done_task);
+
+	spin_unlock(&dev->lock);
+	return IRQ_HANDLED;
+}
+
+static int rk_crypto_enqueue(struct rk_crypto_info *dev,
+			      struct crypto_async_request *async_req)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ret = crypto_enqueue_request(&dev->queue, async_req);
+	if (dev->busy) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return ret;
+	}
+	dev->busy = true;
+	spin_unlock_irqrestore(&dev->lock, flags);
+	tasklet_schedule(&dev->queue_task);
+
+	return ret;
+}
+
+static void rk_crypto_queue_task_cb(unsigned long data)
+{
+	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+	struct crypto_async_request *async_req, *backlog;
+	unsigned long flags;
+	int err = 0;
+
+	dev->err = 0;
+	spin_lock_irqsave(&dev->lock, flags);
+	backlog   = crypto_get_backlog(&dev->queue);
+	async_req = crypto_dequeue_request(&dev->queue);
+
+	if (!async_req) {
+		dev->busy = false;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (backlog) {
+		backlog->complete(backlog, -EINPROGRESS);
+		backlog = NULL;
+	}
+
+	dev->async_req = async_req;
+	err = dev->start(dev);
+	if (err)
+		dev->complete(dev->async_req, err);
+}
+
+static void rk_crypto_done_task_cb(unsigned long data)
+{
+	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+
+	if (dev->err) {
+		dev->complete(dev->async_req, dev->err);
+		return;
+	}
+
+	dev->err = dev->update(dev);
+	if (dev->err)
+		dev->complete(dev->async_req, dev->err);
+}
+
+static struct rk_crypto_tmp *rk_cipher_algs[] = {
+	&rk_ecb_aes_alg,
+	&rk_cbc_aes_alg,
+	&rk_ecb_des_alg,
+	&rk_cbc_des_alg,
+	&rk_ecb_des3_ede_alg,
+	&rk_cbc_des3_ede_alg,
+	&rk_ahash_sha1,
+	&rk_ahash_sha256,
+	&rk_ahash_md5,
+};
+
+static int rk_crypto_register(struct rk_crypto_info *crypto_info)
+{
+	unsigned int i, k;
+	int err = 0;
+
+	for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+		rk_cipher_algs[i]->dev = crypto_info;
+		if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+			err = crypto_register_alg(
+					&rk_cipher_algs[i]->alg.crypto);
+		else
+			err = crypto_register_ahash(
+					&rk_cipher_algs[i]->alg.hash);
+		if (err)
+			goto err_cipher_algs;
+	}
+	return 0;
+
+err_cipher_algs:
+	for (k = 0; k < i; k++) {
+		if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+			crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
+		else
+			crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+	}
+	return err;
+}
+
+static void rk_crypto_unregister(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+		if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+			crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
+		else
+			crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+	}
+}
+
+static void rk_crypto_action(void *data)
+{
+	struct rk_crypto_info *crypto_info = data;
+
+	reset_control_assert(crypto_info->rst);
+}
+
+static const struct of_device_id crypto_of_id_table[] = {
+	{ .compatible = "rockchip,rk3288-crypto" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, crypto_of_id_table);
+
+static int rk_crypto_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct rk_crypto_info *crypto_info;
+	int err = 0;
+
+	crypto_info = devm_kzalloc(&pdev->dev,
+				   sizeof(*crypto_info), GFP_KERNEL);
+	if (!crypto_info) {
+		err = -ENOMEM;
+		goto err_crypto;
+	}
+
+	crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
+	if (IS_ERR(crypto_info->rst)) {
+		err = PTR_ERR(crypto_info->rst);
+		goto err_crypto;
+	}
+
+	reset_control_assert(crypto_info->rst);
+	usleep_range(10, 20);
+	reset_control_deassert(crypto_info->rst);
+
+	err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
+	if (err)
+		goto err_crypto;
+
+	spin_lock_init(&crypto_info->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	crypto_info->reg = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(crypto_info->reg)) {
+		err = PTR_ERR(crypto_info->reg);
+		goto err_crypto;
+	}
+
+	crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
+	if (IS_ERR(crypto_info->aclk)) {
+		err = PTR_ERR(crypto_info->aclk);
+		goto err_crypto;
+	}
+
+	crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
+	if (IS_ERR(crypto_info->hclk)) {
+		err = PTR_ERR(crypto_info->hclk);
+		goto err_crypto;
+	}
+
+	crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
+	if (IS_ERR(crypto_info->sclk)) {
+		err = PTR_ERR(crypto_info->sclk);
+		goto err_crypto;
+	}
+
+	crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
+	if (IS_ERR(crypto_info->dmaclk)) {
+		err = PTR_ERR(crypto_info->dmaclk);
+		goto err_crypto;
+	}
+
+	crypto_info->irq = platform_get_irq(pdev, 0);
+	if (crypto_info->irq < 0) {
+		dev_warn(crypto_info->dev,
+			 "control Interrupt is not available.\n");
+		err = crypto_info->irq;
+		goto err_crypto;
+	}
+
+	err = devm_request_irq(&pdev->dev, crypto_info->irq,
+			       rk_crypto_irq_handle, IRQF_SHARED,
+			       "rk-crypto", pdev);
+
+	if (err) {
+		dev_err(crypto_info->dev, "irq request failed.\n");
+		goto err_crypto;
+	}
+
+	crypto_info->dev = &pdev->dev;
+	platform_set_drvdata(pdev, crypto_info);
+
+	tasklet_init(&crypto_info->queue_task,
+		     rk_crypto_queue_task_cb, (unsigned long)crypto_info);
+	tasklet_init(&crypto_info->done_task,
+		     rk_crypto_done_task_cb, (unsigned long)crypto_info);
+	crypto_init_queue(&crypto_info->queue, 50);
+
+	crypto_info->enable_clk = rk_crypto_enable_clk;
+	crypto_info->disable_clk = rk_crypto_disable_clk;
+	crypto_info->load_data = rk_load_data;
+	crypto_info->unload_data = rk_unload_data;
+	crypto_info->enqueue = rk_crypto_enqueue;
+	crypto_info->busy = false;
+
+	err = rk_crypto_register(crypto_info);
+	if (err) {
+		dev_err(dev, "err in register alg");
+		goto err_register_alg;
+	}
+
+	dev_info(dev, "Crypto Accelerator successfully registered\n");
+	return 0;
+
+err_register_alg:
+	tasklet_kill(&crypto_info->queue_task);
+	tasklet_kill(&crypto_info->done_task);
+err_crypto:
+	return err;
+}
+
+static int rk_crypto_remove(struct platform_device *pdev)
+{
+	struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
+
+	rk_crypto_unregister();
+	tasklet_kill(&crypto_tmp->done_task);
+	tasklet_kill(&crypto_tmp->queue_task);
+	return 0;
+}
+
+static struct platform_driver crypto_driver = {
+	.probe		= rk_crypto_probe,
+	.remove		= rk_crypto_remove,
+	.driver		= {
+		.name	= "rk3288-crypto",
+		.of_match_table	= crypto_of_id_table,
+	},
+};
+
+module_platform_driver(crypto_driver);
+
+MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
+MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
new file mode 100644
index 0000000..d5fb401
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __RK3288_CRYPTO_H__
+#define __RK3288_CRYPTO_H__
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/algapi.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <crypto/internal/hash.h>
+
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#define _SBF(v, f)			((v) << (f))
+
+/* Crypto control registers*/
+#define RK_CRYPTO_INTSTS		0x0000
+#define RK_CRYPTO_PKA_DONE_INT		BIT(5)
+#define RK_CRYPTO_HASH_DONE_INT		BIT(4)
+#define RK_CRYPTO_HRDMA_ERR_INT		BIT(3)
+#define RK_CRYPTO_HRDMA_DONE_INT	BIT(2)
+#define RK_CRYPTO_BCDMA_ERR_INT		BIT(1)
+#define RK_CRYPTO_BCDMA_DONE_INT	BIT(0)
+
+#define RK_CRYPTO_INTENA		0x0004
+#define RK_CRYPTO_PKA_DONE_ENA		BIT(5)
+#define RK_CRYPTO_HASH_DONE_ENA		BIT(4)
+#define RK_CRYPTO_HRDMA_ERR_ENA		BIT(3)
+#define RK_CRYPTO_HRDMA_DONE_ENA	BIT(2)
+#define RK_CRYPTO_BCDMA_ERR_ENA		BIT(1)
+#define RK_CRYPTO_BCDMA_DONE_ENA	BIT(0)
+
+#define RK_CRYPTO_CTRL			0x0008
+#define RK_CRYPTO_WRITE_MASK		_SBF(0xFFFF, 16)
+#define RK_CRYPTO_TRNG_FLUSH		BIT(9)
+#define RK_CRYPTO_TRNG_START		BIT(8)
+#define RK_CRYPTO_PKA_FLUSH		BIT(7)
+#define RK_CRYPTO_HASH_FLUSH		BIT(6)
+#define RK_CRYPTO_BLOCK_FLUSH		BIT(5)
+#define RK_CRYPTO_PKA_START		BIT(4)
+#define RK_CRYPTO_HASH_START		BIT(3)
+#define RK_CRYPTO_BLOCK_START		BIT(2)
+#define RK_CRYPTO_TDES_START		BIT(1)
+#define RK_CRYPTO_AES_START		BIT(0)
+
+#define RK_CRYPTO_CONF			0x000c
+/* HASH Receive DMA Address Mode:   fix | increment */
+#define RK_CRYPTO_HR_ADDR_MODE		BIT(8)
+/* Block Transmit DMA Address Mode: fix | increment */
+#define RK_CRYPTO_BT_ADDR_MODE		BIT(7)
+/* Block Receive DMA Address Mode:  fix | increment */
+#define RK_CRYPTO_BR_ADDR_MODE		BIT(6)
+#define RK_CRYPTO_BYTESWAP_HRFIFO	BIT(5)
+#define RK_CRYPTO_BYTESWAP_BTFIFO	BIT(4)
+#define RK_CRYPTO_BYTESWAP_BRFIFO	BIT(3)
+/* AES = 0 OR DES = 1 */
+#define RK_CRYPTO_DESSEL				BIT(2)
+#define RK_CYYPTO_HASHINSEL_INDEPENDENT_SOURCE		_SBF(0x00, 0)
+#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_INPUT		_SBF(0x01, 0)
+#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_OUTPUT		_SBF(0x02, 0)
+
+/* Block Receiving DMA Start Address Register */
+#define RK_CRYPTO_BRDMAS		0x0010
+/* Block Transmitting DMA Start Address Register */
+#define RK_CRYPTO_BTDMAS		0x0014
+/* Block Receiving DMA Length Register */
+#define RK_CRYPTO_BRDMAL		0x0018
+/* Hash Receiving DMA Start Address Register */
+#define RK_CRYPTO_HRDMAS		0x001c
+/* Hash Receiving DMA Length Register */
+#define RK_CRYPTO_HRDMAL		0x0020
+
+/* AES registers */
+#define RK_CRYPTO_AES_CTRL			  0x0080
+#define RK_CRYPTO_AES_BYTESWAP_CNT	BIT(11)
+#define RK_CRYPTO_AES_BYTESWAP_KEY	BIT(10)
+#define RK_CRYPTO_AES_BYTESWAP_IV	BIT(9)
+#define RK_CRYPTO_AES_BYTESWAP_DO	BIT(8)
+#define RK_CRYPTO_AES_BYTESWAP_DI	BIT(7)
+#define RK_CRYPTO_AES_KEY_CHANGE	BIT(6)
+#define RK_CRYPTO_AES_ECB_MODE		_SBF(0x00, 4)
+#define RK_CRYPTO_AES_CBC_MODE		_SBF(0x01, 4)
+#define RK_CRYPTO_AES_CTR_MODE		_SBF(0x02, 4)
+#define RK_CRYPTO_AES_128BIT_key	_SBF(0x00, 2)
+#define RK_CRYPTO_AES_192BIT_key	_SBF(0x01, 2)
+#define RK_CRYPTO_AES_256BIT_key	_SBF(0x02, 2)
+/* Slave = 0 / fifo = 1 */
+#define RK_CRYPTO_AES_FIFO_MODE		BIT(1)
+/* Encryption = 0 , Decryption = 1 */
+#define RK_CRYPTO_AES_DEC		BIT(0)
+
+#define RK_CRYPTO_AES_STS		0x0084
+#define RK_CRYPTO_AES_DONE		BIT(0)
+
+/* AES Input Data 0-3 Register */
+#define RK_CRYPTO_AES_DIN_0		0x0088
+#define RK_CRYPTO_AES_DIN_1		0x008c
+#define RK_CRYPTO_AES_DIN_2		0x0090
+#define RK_CRYPTO_AES_DIN_3		0x0094
+
+/* AES output Data 0-3 Register */
+#define RK_CRYPTO_AES_DOUT_0		0x0098
+#define RK_CRYPTO_AES_DOUT_1		0x009c
+#define RK_CRYPTO_AES_DOUT_2		0x00a0
+#define RK_CRYPTO_AES_DOUT_3		0x00a4
+
+/* AES IV Data 0-3 Register */
+#define RK_CRYPTO_AES_IV_0		0x00a8
+#define RK_CRYPTO_AES_IV_1		0x00ac
+#define RK_CRYPTO_AES_IV_2		0x00b0
+#define RK_CRYPTO_AES_IV_3		0x00b4
+
+/* AES Key Data 0-3 Register */
+#define RK_CRYPTO_AES_KEY_0		0x00b8
+#define RK_CRYPTO_AES_KEY_1		0x00bc
+#define RK_CRYPTO_AES_KEY_2		0x00c0
+#define RK_CRYPTO_AES_KEY_3		0x00c4
+#define RK_CRYPTO_AES_KEY_4		0x00c8
+#define RK_CRYPTO_AES_KEY_5		0x00cc
+#define RK_CRYPTO_AES_KEY_6		0x00d0
+#define RK_CRYPTO_AES_KEY_7		0x00d4
+
+/* des/tdes */
+#define RK_CRYPTO_TDES_CTRL		0x0100
+#define RK_CRYPTO_TDES_BYTESWAP_KEY	BIT(8)
+#define RK_CRYPTO_TDES_BYTESWAP_IV	BIT(7)
+#define RK_CRYPTO_TDES_BYTESWAP_DO	BIT(6)
+#define RK_CRYPTO_TDES_BYTESWAP_DI	BIT(5)
+/* 0: ECB, 1: CBC */
+#define RK_CRYPTO_TDES_CHAINMODE_CBC	BIT(4)
+/* TDES Key Mode, 0 : EDE, 1 : EEE */
+#define RK_CRYPTO_TDES_EEE		BIT(3)
+/* 0: DES, 1:TDES */
+#define RK_CRYPTO_TDES_SELECT		BIT(2)
+/* 0: Slave, 1:Fifo */
+#define RK_CRYPTO_TDES_FIFO_MODE	BIT(1)
+/* Encryption = 0 , Decryption = 1 */
+#define RK_CRYPTO_TDES_DEC		BIT(0)
+
+#define RK_CRYPTO_TDES_STS		0x0104
+#define RK_CRYPTO_TDES_DONE		BIT(0)
+
+#define RK_CRYPTO_TDES_DIN_0		0x0108
+#define RK_CRYPTO_TDES_DIN_1		0x010c
+#define RK_CRYPTO_TDES_DOUT_0		0x0110
+#define RK_CRYPTO_TDES_DOUT_1		0x0114
+#define RK_CRYPTO_TDES_IV_0		0x0118
+#define RK_CRYPTO_TDES_IV_1		0x011c
+#define RK_CRYPTO_TDES_KEY1_0		0x0120
+#define RK_CRYPTO_TDES_KEY1_1		0x0124
+#define RK_CRYPTO_TDES_KEY2_0		0x0128
+#define RK_CRYPTO_TDES_KEY2_1		0x012c
+#define RK_CRYPTO_TDES_KEY3_0		0x0130
+#define RK_CRYPTO_TDES_KEY3_1		0x0134
+
+/* HASH */
+#define RK_CRYPTO_HASH_CTRL		0x0180
+#define RK_CRYPTO_HASH_SWAP_DO		BIT(3)
+#define RK_CRYPTO_HASH_SWAP_DI		BIT(2)
+#define RK_CRYPTO_HASH_SHA1		_SBF(0x00, 0)
+#define RK_CRYPTO_HASH_MD5		_SBF(0x01, 0)
+#define RK_CRYPTO_HASH_SHA256		_SBF(0x02, 0)
+#define RK_CRYPTO_HASH_PRNG		_SBF(0x03, 0)
+
+#define RK_CRYPTO_HASH_STS		0x0184
+#define RK_CRYPTO_HASH_DONE		BIT(0)
+
+#define RK_CRYPTO_HASH_MSG_LEN		0x0188
+#define RK_CRYPTO_HASH_DOUT_0		0x018c
+#define RK_CRYPTO_HASH_DOUT_1		0x0190
+#define RK_CRYPTO_HASH_DOUT_2		0x0194
+#define RK_CRYPTO_HASH_DOUT_3		0x0198
+#define RK_CRYPTO_HASH_DOUT_4		0x019c
+#define RK_CRYPTO_HASH_DOUT_5		0x01a0
+#define RK_CRYPTO_HASH_DOUT_6		0x01a4
+#define RK_CRYPTO_HASH_DOUT_7		0x01a8
+
+#define CRYPTO_READ(dev, offset)		  \
+		readl_relaxed(((dev)->reg + (offset)))
+#define CRYPTO_WRITE(dev, offset, val)	  \
+		writel_relaxed((val), ((dev)->reg + (offset)))
+
+struct rk_crypto_info {
+	struct device			*dev;
+	struct clk			*aclk;
+	struct clk			*hclk;
+	struct clk			*sclk;
+	struct clk			*dmaclk;
+	struct reset_control		*rst;
+	void __iomem			*reg;
+	int				irq;
+	struct crypto_queue		queue;
+	struct tasklet_struct		queue_task;
+	struct tasklet_struct		done_task;
+	struct crypto_async_request	*async_req;
+	int 				err;
+	/* device lock */
+	spinlock_t			lock;
+
+	/* the public variable */
+	struct scatterlist		*sg_src;
+	struct scatterlist		*sg_dst;
+	struct scatterlist		sg_tmp;
+	struct scatterlist		*first;
+	unsigned int			left_bytes;
+	void				*addr_vir;
+	int				aligned;
+	int				align_size;
+	size_t				nents;
+	unsigned int			total;
+	unsigned int			count;
+	dma_addr_t			addr_in;
+	dma_addr_t			addr_out;
+	bool				busy;
+	int (*start)(struct rk_crypto_info *dev);
+	int (*update)(struct rk_crypto_info *dev);
+	void (*complete)(struct crypto_async_request *base, int err);
+	int (*enable_clk)(struct rk_crypto_info *dev);
+	void (*disable_clk)(struct rk_crypto_info *dev);
+	int (*load_data)(struct rk_crypto_info *dev,
+			 struct scatterlist *sg_src,
+			 struct scatterlist *sg_dst);
+	void (*unload_data)(struct rk_crypto_info *dev);
+	int (*enqueue)(struct rk_crypto_info *dev,
+		       struct crypto_async_request *async_req);
+};
+
+/* the private variable of hash */
+struct rk_ahash_ctx {
+	struct rk_crypto_info		*dev;
+	/* for fallback */
+	struct crypto_ahash		*fallback_tfm;
+};
+
+/* the privete variable of hash for fallback */
+struct rk_ahash_rctx {
+	struct ahash_request		fallback_req;
+	u32				mode;
+};
+
+/* the private variable of cipher */
+struct rk_cipher_ctx {
+	struct rk_crypto_info		*dev;
+	unsigned int			keylen;
+	u32				mode;
+};
+
+enum alg_type {
+	ALG_TYPE_HASH,
+	ALG_TYPE_CIPHER,
+};
+
+struct rk_crypto_tmp {
+	struct rk_crypto_info		*dev;
+	union {
+		struct crypto_alg	crypto;
+		struct ahash_alg	hash;
+	} alg;
+	enum alg_type			type;
+};
+
+extern struct rk_crypto_tmp rk_ecb_aes_alg;
+extern struct rk_crypto_tmp rk_cbc_aes_alg;
+extern struct rk_crypto_tmp rk_ecb_des_alg;
+extern struct rk_crypto_tmp rk_cbc_des_alg;
+extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
+extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
+
+extern struct rk_crypto_tmp rk_ahash_sha1;
+extern struct rk_crypto_tmp rk_ahash_sha256;
+extern struct rk_crypto_tmp rk_ahash_md5;
+
+#endif
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
new file mode 100644
index 0000000..639c15c
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -0,0 +1,508 @@
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+#define RK_CRYPTO_DEC			BIT(0)
+
+static void rk_crypto_complete(struct crypto_async_request *base, int err)
+{
+	if (base->complete)
+		base->complete(base, err);
+}
+
+static int rk_handle_req(struct rk_crypto_info *dev,
+			 struct ablkcipher_request *req)
+{
+	if (!IS_ALIGNED(req->nbytes, dev->align_size))
+		return -EINVAL;
+	else
+		return dev->enqueue(dev, &req->base);
+}
+
+static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
+			 const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->keylen = keylen;
+	memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
+	return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
+			  const u8 *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+
+	if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if (keylen == DES_KEY_SIZE) {
+		if (!des_ekey(tmp, key) &&
+		    (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			return -EINVAL;
+		}
+	}
+
+	ctx->keylen = keylen;
+	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+	return 0;
+}
+
+static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_AES_ECB_MODE;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_AES_CBC_MODE;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = 0;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_TDES_SELECT;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+	return rk_handle_req(dev, req);
+}
+
+static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct rk_crypto_info *dev = ctx->dev;
+
+	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+		    RK_CRYPTO_DEC;
+	return rk_handle_req(dev, req);
+}
+
+static void rk_ablk_hw_init(struct rk_crypto_info *dev)
+{
+	struct ablkcipher_request *req =
+		ablkcipher_request_cast(dev->async_req);
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 ivsize, block, conf_reg = 0;
+
+	block = crypto_tfm_alg_blocksize(tfm);
+	ivsize = crypto_ablkcipher_ivsize(cipher);
+
+	if (block == DES_BLOCK_SIZE) {
+		ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+			     RK_CRYPTO_TDES_BYTESWAP_KEY |
+			     RK_CRYPTO_TDES_BYTESWAP_IV;
+		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
+		conf_reg = RK_CRYPTO_DESSEL;
+	} else {
+		ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
+			     RK_CRYPTO_AES_KEY_CHANGE |
+			     RK_CRYPTO_AES_BYTESWAP_KEY |
+			     RK_CRYPTO_AES_BYTESWAP_IV;
+		if (ctx->keylen == AES_KEYSIZE_192)
+			ctx->mode |= RK_CRYPTO_AES_192BIT_key;
+		else if (ctx->keylen == AES_KEYSIZE_256)
+			ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
+	}
+	conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
+		    RK_CRYPTO_BYTESWAP_BRFIFO;
+	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
+	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
+		     RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
+}
+
+static void crypto_dma_start(struct rk_crypto_info *dev)
+{
+	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
+	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
+	CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
+	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
+		     _SBF(RK_CRYPTO_BLOCK_START, 16));
+}
+
+static int rk_set_data_start(struct rk_crypto_info *dev)
+{
+	int err;
+
+	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+	if (!err)
+		crypto_dma_start(dev);
+	return err;
+}
+
+static int rk_ablk_start(struct rk_crypto_info *dev)
+{
+	struct ablkcipher_request *req =
+		ablkcipher_request_cast(dev->async_req);
+	unsigned long flags;
+	int err = 0;
+
+	dev->left_bytes = req->nbytes;
+	dev->total = req->nbytes;
+	dev->sg_src = req->src;
+	dev->first = req->src;
+	dev->nents = sg_nents(req->src);
+	dev->sg_dst = req->dst;
+	dev->aligned = 1;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	rk_ablk_hw_init(dev);
+	err = rk_set_data_start(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return err;
+}
+
+static void rk_iv_copyback(struct rk_crypto_info *dev)
+{
+	struct ablkcipher_request *req =
+		ablkcipher_request_cast(dev->async_req);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
+
+	if (ivsize == DES_BLOCK_SIZE)
+		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
+			      ivsize);
+	else if (ivsize == AES_BLOCK_SIZE)
+		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+}
+
+/* return:
+ *	true	some err was occurred
+ *	fault	no err, continue
+ */
+static int rk_ablk_rx(struct rk_crypto_info *dev)
+{
+	int err = 0;
+	struct ablkcipher_request *req =
+		ablkcipher_request_cast(dev->async_req);
+
+	dev->unload_data(dev);
+	if (!dev->aligned) {
+		if (!sg_pcopy_from_buffer(req->dst, dev->nents,
+					  dev->addr_vir, dev->count,
+					  dev->total - dev->left_bytes -
+					  dev->count)) {
+			err = -EINVAL;
+			goto out_rx;
+		}
+	}
+	if (dev->left_bytes) {
+		if (dev->aligned) {
+			if (sg_is_last(dev->sg_src)) {
+				dev_err(dev->dev, "[%s:%d] Lack of data\n",
+					__func__, __LINE__);
+				err = -ENOMEM;
+				goto out_rx;
+			}
+			dev->sg_src = sg_next(dev->sg_src);
+			dev->sg_dst = sg_next(dev->sg_dst);
+		}
+		err = rk_set_data_start(dev);
+	} else {
+		rk_iv_copyback(dev);
+		/* here show the calculation is over without any err */
+		dev->complete(dev->async_req, 0);
+		tasklet_schedule(&dev->queue_task);
+	}
+out_rx:
+	return err;
+}
+
+static int rk_ablk_cra_init(struct crypto_tfm *tfm)
+{
+	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct rk_crypto_tmp *algt;
+
+	algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
+
+	ctx->dev = algt->dev;
+	ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
+	ctx->dev->start = rk_ablk_start;
+	ctx->dev->update = rk_ablk_rx;
+	ctx->dev->complete = rk_crypto_complete;
+	ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
+
+	return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
+}
+
+static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
+{
+	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	free_page((unsigned long)ctx->dev->addr_vir);
+	ctx->dev->disable_clk(ctx->dev);
+}
+
+struct rk_crypto_tmp rk_ecb_aes_alg = {
+	.type = ALG_TYPE_CIPHER,
+	.alg.crypto = {
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "ecb-aes-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x0f,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= rk_aes_setkey,
+			.encrypt	= rk_aes_ecb_encrypt,
+			.decrypt	= rk_aes_ecb_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_cbc_aes_alg = {
+	.type = ALG_TYPE_CIPHER,
+	.alg.crypto = {
+		.cra_name		= "cbc(aes)",
+		.cra_driver_name	= "cbc-aes-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x0f,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.setkey		= rk_aes_setkey,
+			.encrypt	= rk_aes_cbc_encrypt,
+			.decrypt	= rk_aes_cbc_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_ecb_des_alg = {
+	.type = ALG_TYPE_CIPHER,
+	.alg.crypto = {
+		.cra_name		= "ecb(des)",
+		.cra_driver_name	= "ecb-des-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= DES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x07,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= rk_tdes_setkey,
+			.encrypt	= rk_des_ecb_encrypt,
+			.decrypt	= rk_des_ecb_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_cbc_des_alg = {
+	.type = ALG_TYPE_CIPHER,
+	.alg.crypto = {
+		.cra_name		= "cbc(des)",
+		.cra_driver_name	= "cbc-des-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= DES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x07,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
+			.setkey		= rk_tdes_setkey,
+			.encrypt	= rk_des_cbc_encrypt,
+			.decrypt	= rk_des_cbc_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
+	.type = ALG_TYPE_CIPHER,
+	.alg.crypto = {
+		.cra_name		= "ecb(des3_ede)",
+		.cra_driver_name	= "ecb-des3-ede-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= DES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x07,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
+			.setkey		= rk_tdes_setkey,
+			.encrypt	= rk_des3_ede_ecb_encrypt,
+			.decrypt	= rk_des3_ede_ecb_decrypt,
+		}
+	}
+};
+
+struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
+	.type = ALG_TYPE_CIPHER,
+	.alg.crypto = {
+		.cra_name		= "cbc(des3_ede)",
+		.cra_driver_name	= "cbc-des3-ede-rk",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= DES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
+		.cra_alignmask		= 0x07,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= rk_ablk_cra_init,
+		.cra_exit		= rk_ablk_cra_exit,
+		.cra_u.ablkcipher	= {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
+			.setkey		= rk_tdes_setkey,
+			.encrypt	= rk_des3_ede_cbc_encrypt,
+			.decrypt	= rk_des3_ede_cbc_decrypt,
+		}
+	}
+};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
new file mode 100644
index 0000000..821a506
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -0,0 +1,405 @@
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+/*
+ * IC can not process zero message hash,
+ * so we put the fixed hash out when met zero message.
+ */
+
+static int zero_message_process(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	int rk_digest_size = crypto_ahash_digestsize(tfm);
+
+	switch (rk_digest_size) {
+	case SHA1_DIGEST_SIZE:
+		memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
+		break;
+	case SHA256_DIGEST_SIZE:
+		memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
+		break;
+	case MD5_DIGEST_SIZE:
+		memcpy(req->result, md5_zero_message_hash, rk_digest_size);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
+{
+	if (base->complete)
+		base->complete(base, err);
+}
+
+static void rk_ahash_reg_init(struct rk_crypto_info *dev)
+{
+	struct ahash_request *req = ahash_request_cast(dev->async_req);
+	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+	int reg_status = 0;
+
+	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
+		     RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
+	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
+	reg_status &= (~RK_CRYPTO_HASH_FLUSH);
+	reg_status |= _SBF(0xffff, 16);
+	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+	memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
+
+	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
+					    RK_CRYPTO_HRDMA_DONE_ENA);
+
+	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
+					    RK_CRYPTO_HRDMA_DONE_INT);
+
+	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
+					       RK_CRYPTO_HASH_SWAP_DO);
+
+	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
+					  RK_CRYPTO_BYTESWAP_BRFIFO |
+					  RK_CRYPTO_BYTESWAP_BTFIFO);
+
+	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
+}
+
+static int rk_ahash_init(struct ahash_request *req)
+{
+	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags &
+					CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int rk_ahash_update(struct ahash_request *req)
+{
+	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags &
+					CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->fallback_req.nbytes = req->nbytes;
+	rctx->fallback_req.src = req->src;
+
+	return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int rk_ahash_final(struct ahash_request *req)
+{
+	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags &
+					CRYPTO_TFM_REQ_MAY_SLEEP;
+	rctx->fallback_req.result = req->result;
+
+	return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int rk_ahash_finup(struct ahash_request *req)
+{
+	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags &
+					CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	rctx->fallback_req.nbytes = req->nbytes;
+	rctx->fallback_req.src = req->src;
+	rctx->fallback_req.result = req->result;
+
+	return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int rk_ahash_import(struct ahash_request *req, const void *in)
+{
+	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags &
+					CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int rk_ahash_export(struct ahash_request *req, void *out)
+{
+	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	rctx->fallback_req.base.flags = req->base.flags &
+					CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+static int rk_ahash_digest(struct ahash_request *req)
+{
+	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct rk_crypto_info *dev = tctx->dev;
+
+	if (!req->nbytes)
+		return zero_message_process(req);
+	else
+		return dev->enqueue(dev, &req->base);
+}
+
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
+{
+	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
+	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
+	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
+					  (RK_CRYPTO_HASH_START << 16));
+}
+
+static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
+{
+	int err;
+
+	err = dev->load_data(dev, dev->sg_src, NULL);
+	if (!err)
+		crypto_ahash_dma_start(dev);
+	return err;
+}
+
+static int rk_ahash_start(struct rk_crypto_info *dev)
+{
+	struct ahash_request *req = ahash_request_cast(dev->async_req);
+	struct crypto_ahash *tfm;
+	struct rk_ahash_rctx *rctx;
+
+	dev->total = req->nbytes;
+	dev->left_bytes = req->nbytes;
+	dev->aligned = 0;
+	dev->align_size = 4;
+	dev->sg_dst = NULL;
+	dev->sg_src = req->src;
+	dev->first = req->src;
+	dev->nents = sg_nents(req->src);
+	rctx = ahash_request_ctx(req);
+	rctx->mode = 0;
+
+	tfm = crypto_ahash_reqtfm(req);
+	switch (crypto_ahash_digestsize(tfm)) {
+	case SHA1_DIGEST_SIZE:
+		rctx->mode = RK_CRYPTO_HASH_SHA1;
+		break;
+	case SHA256_DIGEST_SIZE:
+		rctx->mode = RK_CRYPTO_HASH_SHA256;
+		break;
+	case MD5_DIGEST_SIZE:
+		rctx->mode = RK_CRYPTO_HASH_MD5;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rk_ahash_reg_init(dev);
+	return rk_ahash_set_data_start(dev);
+}
+
+static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
+{
+	int err = 0;
+	struct ahash_request *req = ahash_request_cast(dev->async_req);
+	struct crypto_ahash *tfm;
+
+	dev->unload_data(dev);
+	if (dev->left_bytes) {
+		if (dev->aligned) {
+			if (sg_is_last(dev->sg_src)) {
+				dev_warn(dev->dev, "[%s:%d], Lack of data\n",
+					 __func__, __LINE__);
+				err = -ENOMEM;
+				goto out_rx;
+			}
+			dev->sg_src = sg_next(dev->sg_src);
+		}
+		err = rk_ahash_set_data_start(dev);
+	} else {
+		/*
+		 * it will take some time to process date after last dma
+		 * transmission.
+		 *
+		 * waiting time is relative with the last date len,
+		 * so cannot set a fixed time here.
+		 * 10us makes system not call here frequently wasting
+		 * efficiency, and make it response quickly when dma
+		 * complete.
+		 */
+		while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+			udelay(10);
+
+		tfm = crypto_ahash_reqtfm(req);
+		memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+			      crypto_ahash_digestsize(tfm));
+		dev->complete(dev->async_req, 0);
+		tasklet_schedule(&dev->queue_task);
+	}
+
+out_rx:
+	return err;
+}
+
+static int rk_cra_hash_init(struct crypto_tfm *tfm)
+{
+	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+	struct rk_crypto_tmp *algt;
+	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+
+	algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+	tctx->dev = algt->dev;
+	tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
+	if (!tctx->dev->addr_vir) {
+		dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
+		return -ENOMEM;
+	}
+	tctx->dev->start = rk_ahash_start;
+	tctx->dev->update = rk_ahash_crypto_rx;
+	tctx->dev->complete = rk_ahash_crypto_complete;
+
+	/* for fallback */
+	tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
+					       CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(tctx->fallback_tfm)) {
+		dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+		return PTR_ERR(tctx->fallback_tfm);
+	}
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct rk_ahash_rctx) +
+				 crypto_ahash_reqsize(tctx->fallback_tfm));
+
+	return tctx->dev->enable_clk(tctx->dev);
+}
+
+static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+{
+	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	free_page((unsigned long)tctx->dev->addr_vir);
+	return tctx->dev->disable_clk(tctx->dev);
+}
+
+struct rk_crypto_tmp rk_ahash_sha1 = {
+	.type = ALG_TYPE_HASH,
+	.alg.hash = {
+		.init = rk_ahash_init,
+		.update = rk_ahash_update,
+		.final = rk_ahash_final,
+		.finup = rk_ahash_finup,
+		.export = rk_ahash_export,
+		.import = rk_ahash_import,
+		.digest = rk_ahash_digest,
+		.halg = {
+			 .digestsize = SHA1_DIGEST_SIZE,
+			 .statesize = sizeof(struct sha1_state),
+			 .base = {
+				  .cra_name = "sha1",
+				  .cra_driver_name = "rk-sha1",
+				  .cra_priority = 300,
+				  .cra_flags = CRYPTO_ALG_ASYNC |
+					       CRYPTO_ALG_NEED_FALLBACK,
+				  .cra_blocksize = SHA1_BLOCK_SIZE,
+				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+				  .cra_alignmask = 3,
+				  .cra_init = rk_cra_hash_init,
+				  .cra_exit = rk_cra_hash_exit,
+				  .cra_module = THIS_MODULE,
+				  }
+			 }
+	}
+};
+
+struct rk_crypto_tmp rk_ahash_sha256 = {
+	.type = ALG_TYPE_HASH,
+	.alg.hash = {
+		.init = rk_ahash_init,
+		.update = rk_ahash_update,
+		.final = rk_ahash_final,
+		.finup = rk_ahash_finup,
+		.export = rk_ahash_export,
+		.import = rk_ahash_import,
+		.digest = rk_ahash_digest,
+		.halg = {
+			 .digestsize = SHA256_DIGEST_SIZE,
+			 .statesize = sizeof(struct sha256_state),
+			 .base = {
+				  .cra_name = "sha256",
+				  .cra_driver_name = "rk-sha256",
+				  .cra_priority = 300,
+				  .cra_flags = CRYPTO_ALG_ASYNC |
+					       CRYPTO_ALG_NEED_FALLBACK,
+				  .cra_blocksize = SHA256_BLOCK_SIZE,
+				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+				  .cra_alignmask = 3,
+				  .cra_init = rk_cra_hash_init,
+				  .cra_exit = rk_cra_hash_exit,
+				  .cra_module = THIS_MODULE,
+				  }
+			 }
+	}
+};
+
+struct rk_crypto_tmp rk_ahash_md5 = {
+	.type = ALG_TYPE_HASH,
+	.alg.hash = {
+		.init = rk_ahash_init,
+		.update = rk_ahash_update,
+		.final = rk_ahash_final,
+		.finup = rk_ahash_finup,
+		.export = rk_ahash_export,
+		.import = rk_ahash_import,
+		.digest = rk_ahash_digest,
+		.halg = {
+			 .digestsize = MD5_DIGEST_SIZE,
+			 .statesize = sizeof(struct md5_state),
+			 .base = {
+				  .cra_name = "md5",
+				  .cra_driver_name = "rk-md5",
+				  .cra_priority = 300,
+				  .cra_flags = CRYPTO_ALG_ASYNC |
+					       CRYPTO_ALG_NEED_FALLBACK,
+				  .cra_blocksize = SHA1_BLOCK_SIZE,
+				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+				  .cra_alignmask = 3,
+				  .cra_init = rk_cra_hash_init,
+				  .cra_exit = rk_cra_hash_exit,
+				  .cra_module = THIS_MODULE,
+				  }
+			}
+	}
+};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
new file mode 100644
index 0000000..faa2820
--- /dev/null
+++ b/drivers/crypto/s5p-sss.c
@@ -0,0 +1,2329 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Cryptographic API.
+//
+// Support for Samsung S5PV210 and Exynos HW acceleration.
+//
+// Copyright (C) 2011 NetUP Inc. All rights reserved.
+// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
+//
+// Hash part based on omap-sham.c driver.
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include <crypto/ctr.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define _SBF(s, v)			((v) << (s))
+
+/* Feed control registers */
+#define SSS_REG_FCINTSTAT		0x0000
+#define SSS_FCINTSTAT_HPARTINT		BIT(7)
+#define SSS_FCINTSTAT_HDONEINT		BIT(5)
+#define SSS_FCINTSTAT_BRDMAINT		BIT(3)
+#define SSS_FCINTSTAT_BTDMAINT		BIT(2)
+#define SSS_FCINTSTAT_HRDMAINT		BIT(1)
+#define SSS_FCINTSTAT_PKDMAINT		BIT(0)
+
+#define SSS_REG_FCINTENSET		0x0004
+#define SSS_FCINTENSET_HPARTINTENSET	BIT(7)
+#define SSS_FCINTENSET_HDONEINTENSET	BIT(5)
+#define SSS_FCINTENSET_BRDMAINTENSET	BIT(3)
+#define SSS_FCINTENSET_BTDMAINTENSET	BIT(2)
+#define SSS_FCINTENSET_HRDMAINTENSET	BIT(1)
+#define SSS_FCINTENSET_PKDMAINTENSET	BIT(0)
+
+#define SSS_REG_FCINTENCLR		0x0008
+#define SSS_FCINTENCLR_HPARTINTENCLR	BIT(7)
+#define SSS_FCINTENCLR_HDONEINTENCLR	BIT(5)
+#define SSS_FCINTENCLR_BRDMAINTENCLR	BIT(3)
+#define SSS_FCINTENCLR_BTDMAINTENCLR	BIT(2)
+#define SSS_FCINTENCLR_HRDMAINTENCLR	BIT(1)
+#define SSS_FCINTENCLR_PKDMAINTENCLR	BIT(0)
+
+#define SSS_REG_FCINTPEND		0x000C
+#define SSS_FCINTPEND_HPARTINTP		BIT(7)
+#define SSS_FCINTPEND_HDONEINTP		BIT(5)
+#define SSS_FCINTPEND_BRDMAINTP		BIT(3)
+#define SSS_FCINTPEND_BTDMAINTP		BIT(2)
+#define SSS_FCINTPEND_HRDMAINTP		BIT(1)
+#define SSS_FCINTPEND_PKDMAINTP		BIT(0)
+
+#define SSS_REG_FCFIFOSTAT		0x0010
+#define SSS_FCFIFOSTAT_BRFIFOFUL	BIT(7)
+#define SSS_FCFIFOSTAT_BRFIFOEMP	BIT(6)
+#define SSS_FCFIFOSTAT_BTFIFOFUL	BIT(5)
+#define SSS_FCFIFOSTAT_BTFIFOEMP	BIT(4)
+#define SSS_FCFIFOSTAT_HRFIFOFUL	BIT(3)
+#define SSS_FCFIFOSTAT_HRFIFOEMP	BIT(2)
+#define SSS_FCFIFOSTAT_PKFIFOFUL	BIT(1)
+#define SSS_FCFIFOSTAT_PKFIFOEMP	BIT(0)
+
+#define SSS_REG_FCFIFOCTRL		0x0014
+#define SSS_FCFIFOCTRL_DESSEL		BIT(2)
+#define SSS_HASHIN_INDEPENDENT		_SBF(0, 0x00)
+#define SSS_HASHIN_CIPHER_INPUT		_SBF(0, 0x01)
+#define SSS_HASHIN_CIPHER_OUTPUT	_SBF(0, 0x02)
+#define SSS_HASHIN_MASK			_SBF(0, 0x03)
+
+#define SSS_REG_FCBRDMAS		0x0020
+#define SSS_REG_FCBRDMAL		0x0024
+#define SSS_REG_FCBRDMAC		0x0028
+#define SSS_FCBRDMAC_BYTESWAP		BIT(1)
+#define SSS_FCBRDMAC_FLUSH		BIT(0)
+
+#define SSS_REG_FCBTDMAS		0x0030
+#define SSS_REG_FCBTDMAL		0x0034
+#define SSS_REG_FCBTDMAC		0x0038
+#define SSS_FCBTDMAC_BYTESWAP		BIT(1)
+#define SSS_FCBTDMAC_FLUSH		BIT(0)
+
+#define SSS_REG_FCHRDMAS		0x0040
+#define SSS_REG_FCHRDMAL		0x0044
+#define SSS_REG_FCHRDMAC		0x0048
+#define SSS_FCHRDMAC_BYTESWAP		BIT(1)
+#define SSS_FCHRDMAC_FLUSH		BIT(0)
+
+#define SSS_REG_FCPKDMAS		0x0050
+#define SSS_REG_FCPKDMAL		0x0054
+#define SSS_REG_FCPKDMAC		0x0058
+#define SSS_FCPKDMAC_BYTESWAP		BIT(3)
+#define SSS_FCPKDMAC_DESCEND		BIT(2)
+#define SSS_FCPKDMAC_TRANSMIT		BIT(1)
+#define SSS_FCPKDMAC_FLUSH		BIT(0)
+
+#define SSS_REG_FCPKDMAO		0x005C
+
+/* AES registers */
+#define SSS_REG_AES_CONTROL		0x00
+#define SSS_AES_BYTESWAP_DI		BIT(11)
+#define SSS_AES_BYTESWAP_DO		BIT(10)
+#define SSS_AES_BYTESWAP_IV		BIT(9)
+#define SSS_AES_BYTESWAP_CNT		BIT(8)
+#define SSS_AES_BYTESWAP_KEY		BIT(7)
+#define SSS_AES_KEY_CHANGE_MODE		BIT(6)
+#define SSS_AES_KEY_SIZE_128		_SBF(4, 0x00)
+#define SSS_AES_KEY_SIZE_192		_SBF(4, 0x01)
+#define SSS_AES_KEY_SIZE_256		_SBF(4, 0x02)
+#define SSS_AES_FIFO_MODE		BIT(3)
+#define SSS_AES_CHAIN_MODE_ECB		_SBF(1, 0x00)
+#define SSS_AES_CHAIN_MODE_CBC		_SBF(1, 0x01)
+#define SSS_AES_CHAIN_MODE_CTR		_SBF(1, 0x02)
+#define SSS_AES_MODE_DECRYPT		BIT(0)
+
+#define SSS_REG_AES_STATUS		0x04
+#define SSS_AES_BUSY			BIT(2)
+#define SSS_AES_INPUT_READY		BIT(1)
+#define SSS_AES_OUTPUT_READY		BIT(0)
+
+#define SSS_REG_AES_IN_DATA(s)		(0x10 + (s << 2))
+#define SSS_REG_AES_OUT_DATA(s)		(0x20 + (s << 2))
+#define SSS_REG_AES_IV_DATA(s)		(0x30 + (s << 2))
+#define SSS_REG_AES_CNT_DATA(s)		(0x40 + (s << 2))
+#define SSS_REG_AES_KEY_DATA(s)		(0x80 + (s << 2))
+
+#define SSS_REG(dev, reg)		((dev)->ioaddr + (SSS_REG_##reg))
+#define SSS_READ(dev, reg)		__raw_readl(SSS_REG(dev, reg))
+#define SSS_WRITE(dev, reg, val)	__raw_writel((val), SSS_REG(dev, reg))
+
+#define SSS_AES_REG(dev, reg)		((dev)->aes_ioaddr + SSS_REG_##reg)
+#define SSS_AES_WRITE(dev, reg, val)    __raw_writel((val), \
+						SSS_AES_REG(dev, reg))
+
+/* HW engine modes */
+#define FLAGS_AES_DECRYPT		BIT(0)
+#define FLAGS_AES_MODE_MASK		_SBF(1, 0x03)
+#define FLAGS_AES_CBC			_SBF(1, 0x01)
+#define FLAGS_AES_CTR			_SBF(1, 0x02)
+
+#define AES_KEY_LEN			16
+#define CRYPTO_QUEUE_LEN		1
+
+/* HASH registers */
+#define SSS_REG_HASH_CTRL		0x00
+
+#define SSS_HASH_USER_IV_EN		BIT(5)
+#define SSS_HASH_INIT_BIT		BIT(4)
+#define SSS_HASH_ENGINE_SHA1		_SBF(1, 0x00)
+#define SSS_HASH_ENGINE_MD5		_SBF(1, 0x01)
+#define SSS_HASH_ENGINE_SHA256		_SBF(1, 0x02)
+
+#define SSS_HASH_ENGINE_MASK		_SBF(1, 0x03)
+
+#define SSS_REG_HASH_CTRL_PAUSE		0x04
+
+#define SSS_HASH_PAUSE			BIT(0)
+
+#define SSS_REG_HASH_CTRL_FIFO		0x08
+
+#define SSS_HASH_FIFO_MODE_DMA		BIT(0)
+#define SSS_HASH_FIFO_MODE_CPU          0
+
+#define SSS_REG_HASH_CTRL_SWAP		0x0C
+
+#define SSS_HASH_BYTESWAP_DI		BIT(3)
+#define SSS_HASH_BYTESWAP_DO		BIT(2)
+#define SSS_HASH_BYTESWAP_IV		BIT(1)
+#define SSS_HASH_BYTESWAP_KEY		BIT(0)
+
+#define SSS_REG_HASH_STATUS		0x10
+
+#define SSS_HASH_STATUS_MSG_DONE	BIT(6)
+#define SSS_HASH_STATUS_PARTIAL_DONE	BIT(4)
+#define SSS_HASH_STATUS_BUFFER_READY	BIT(0)
+
+#define SSS_REG_HASH_MSG_SIZE_LOW	0x20
+#define SSS_REG_HASH_MSG_SIZE_HIGH	0x24
+
+#define SSS_REG_HASH_PRE_MSG_SIZE_LOW	0x28
+#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH	0x2C
+
+#define SSS_REG_HASH_IV(s)		(0xB0 + ((s) << 2))
+#define SSS_REG_HASH_OUT(s)		(0x100 + ((s) << 2))
+
+#define HASH_BLOCK_SIZE			64
+#define HASH_REG_SIZEOF			4
+#define HASH_MD5_MAX_REG		(MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
+#define HASH_SHA1_MAX_REG		(SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
+#define HASH_SHA256_MAX_REG		(SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
+
+/*
+ * HASH bit numbers, used by device, setting in dev->hash_flags with
+ * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
+ * to keep HASH state BUSY or FREE, or to signal state from irq_handler
+ * to hash_tasklet. SGS keep track of allocated memory for scatterlist
+ */
+#define HASH_FLAGS_BUSY		0
+#define HASH_FLAGS_FINAL	1
+#define HASH_FLAGS_DMA_ACTIVE	2
+#define HASH_FLAGS_OUTPUT_READY	3
+#define HASH_FLAGS_DMA_READY	4
+#define HASH_FLAGS_SGS_COPIED	5
+#define HASH_FLAGS_SGS_ALLOCED	6
+
+/* HASH HW constants */
+#define BUFLEN			HASH_BLOCK_SIZE
+
+#define SSS_HASH_DMA_LEN_ALIGN	8
+#define SSS_HASH_DMA_ALIGN_MASK	(SSS_HASH_DMA_LEN_ALIGN - 1)
+
+#define SSS_HASH_QUEUE_LENGTH	10
+
+/**
+ * struct samsung_aes_variant - platform specific SSS driver data
+ * @aes_offset: AES register offset from SSS module's base.
+ * @hash_offset: HASH register offset from SSS module's base.
+ *
+ * Specifies platform specific configuration of SSS module.
+ * Note: A structure for driver specific platform data is used for future
+ * expansion of its usage.
+ */
+struct samsung_aes_variant {
+	unsigned int			aes_offset;
+	unsigned int			hash_offset;
+};
+
+struct s5p_aes_reqctx {
+	unsigned long			mode;
+};
+
+struct s5p_aes_ctx {
+	struct s5p_aes_dev		*dev;
+
+	uint8_t				aes_key[AES_MAX_KEY_SIZE];
+	uint8_t				nonce[CTR_RFC3686_NONCE_SIZE];
+	int				keylen;
+};
+
+/**
+ * struct s5p_aes_dev - Crypto device state container
+ * @dev:	Associated device
+ * @clk:	Clock for accessing hardware
+ * @ioaddr:	Mapped IO memory region
+ * @aes_ioaddr:	Per-varian offset for AES block IO memory
+ * @irq_fc:	Feed control interrupt line
+ * @req:	Crypto request currently handled by the device
+ * @ctx:	Configuration for currently handled crypto request
+ * @sg_src:	Scatter list with source data for currently handled block
+ *		in device.  This is DMA-mapped into device.
+ * @sg_dst:	Scatter list with destination data for currently handled block
+ *		in device. This is DMA-mapped into device.
+ * @sg_src_cpy:	In case of unaligned access, copied scatter list
+ *		with source data.
+ * @sg_dst_cpy:	In case of unaligned access, copied scatter list
+ *		with destination data.
+ * @tasklet:	New request scheduling jib
+ * @queue:	Crypto queue
+ * @busy:	Indicates whether the device is currently handling some request
+ *		thus it uses some of the fields from this state, like:
+ *		req, ctx, sg_src/dst (and copies).  This essentially
+ *		protects against concurrent access to these fields.
+ * @lock:	Lock for protecting both access to device hardware registers
+ *		and fields related to current request (including the busy field).
+ * @res:	Resources for hash.
+ * @io_hash_base: Per-variant offset for HASH block IO memory.
+ * @hash_lock:	Lock for protecting hash_req, hash_queue and hash_flags
+ *		variable.
+ * @hash_flags:	Flags for current HASH op.
+ * @hash_queue:	Async hash queue.
+ * @hash_tasklet: New HASH request scheduling job.
+ * @xmit_buf:	Buffer for current HASH request transfer into SSS block.
+ * @hash_req:	Current request sending to SSS HASH block.
+ * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
+ * @hash_sg_cnt: Counter for hash_sg_iter.
+ *
+ * @use_hash:	true if HASH algs enabled
+ */
+struct s5p_aes_dev {
+	struct device			*dev;
+	struct clk			*clk;
+	void __iomem			*ioaddr;
+	void __iomem			*aes_ioaddr;
+	int				irq_fc;
+
+	struct ablkcipher_request	*req;
+	struct s5p_aes_ctx		*ctx;
+	struct scatterlist		*sg_src;
+	struct scatterlist		*sg_dst;
+
+	struct scatterlist		*sg_src_cpy;
+	struct scatterlist		*sg_dst_cpy;
+
+	struct tasklet_struct		tasklet;
+	struct crypto_queue		queue;
+	bool				busy;
+	spinlock_t			lock;
+
+	struct resource			*res;
+	void __iomem			*io_hash_base;
+
+	spinlock_t			hash_lock; /* protect hash_ vars */
+	unsigned long			hash_flags;
+	struct crypto_queue		hash_queue;
+	struct tasklet_struct		hash_tasklet;
+
+	u8				xmit_buf[BUFLEN];
+	struct ahash_request		*hash_req;
+	struct scatterlist		*hash_sg_iter;
+	unsigned int			hash_sg_cnt;
+
+	bool				use_hash;
+};
+
+/**
+ * struct s5p_hash_reqctx - HASH request context
+ * @dd:		Associated device
+ * @op_update:	Current request operation (OP_UPDATE or OP_FINAL)
+ * @digcnt:	Number of bytes processed by HW (without buffer[] ones)
+ * @digest:	Digest message or IV for partial result
+ * @nregs:	Number of HW registers for digest or IV read/write
+ * @engine:	Bits for selecting type of HASH in SSS block
+ * @sg:		sg for DMA transfer
+ * @sg_len:	Length of sg for DMA transfer
+ * @sgl[]:	sg for joining buffer and req->src scatterlist
+ * @skip:	Skip offset in req->src for current op
+ * @total:	Total number of bytes for current request
+ * @finup:	Keep state for finup or final.
+ * @error:	Keep track of error.
+ * @bufcnt:	Number of bytes holded in buffer[]
+ * @buffer[]:	For byte(s) from end of req->src in UPDATE op
+ */
+struct s5p_hash_reqctx {
+	struct s5p_aes_dev	*dd;
+	bool			op_update;
+
+	u64			digcnt;
+	u8			digest[SHA256_DIGEST_SIZE];
+
+	unsigned int		nregs; /* digest_size / sizeof(reg) */
+	u32			engine;
+
+	struct scatterlist	*sg;
+	unsigned int		sg_len;
+	struct scatterlist	sgl[2];
+	unsigned int		skip;
+	unsigned int		total;
+	bool			finup;
+	bool			error;
+
+	u32			bufcnt;
+	u8			buffer[0];
+};
+
+/**
+ * struct s5p_hash_ctx - HASH transformation context
+ * @dd:		Associated device
+ * @flags:	Bits for algorithm HASH.
+ * @fallback:	Software transformation for zero message or size < BUFLEN.
+ */
+struct s5p_hash_ctx {
+	struct s5p_aes_dev	*dd;
+	unsigned long		flags;
+	struct crypto_shash	*fallback;
+};
+
+static const struct samsung_aes_variant s5p_aes_data = {
+	.aes_offset	= 0x4000,
+	.hash_offset	= 0x6000,
+};
+
+static const struct samsung_aes_variant exynos_aes_data = {
+	.aes_offset	= 0x200,
+	.hash_offset	= 0x400,
+};
+
+static const struct of_device_id s5p_sss_dt_match[] = {
+	{
+		.compatible = "samsung,s5pv210-secss",
+		.data = &s5p_aes_data,
+	},
+	{
+		.compatible = "samsung,exynos4210-secss",
+		.data = &exynos_aes_data,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
+
+static inline const struct samsung_aes_variant *find_s5p_sss_version
+				   (const struct platform_device *pdev)
+{
+	if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
+		const struct of_device_id *match;
+
+		match = of_match_node(s5p_sss_dt_match,
+					pdev->dev.of_node);
+		return (const struct samsung_aes_variant *)match->data;
+	}
+	return (const struct samsung_aes_variant *)
+			platform_get_device_id(pdev)->driver_data;
+}
+
+static struct s5p_aes_dev *s5p_dev;
+
+static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
+			       const struct scatterlist *sg)
+{
+	SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
+	SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
+}
+
+static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
+				const struct scatterlist *sg)
+{
+	SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
+	SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
+}
+
+static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
+{
+	int len;
+
+	if (!*sg)
+		return;
+
+	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+	free_pages((unsigned long)sg_virt(*sg), get_order(len));
+
+	kfree(*sg);
+	*sg = NULL;
+}
+
+static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
+			    unsigned int nbytes, int out)
+{
+	struct scatter_walk walk;
+
+	if (!nbytes)
+		return;
+
+	scatterwalk_start(&walk, sg);
+	scatterwalk_copychunks(buf, &walk, nbytes, out);
+	scatterwalk_done(&walk, out, 0);
+}
+
+static void s5p_sg_done(struct s5p_aes_dev *dev)
+{
+	if (dev->sg_dst_cpy) {
+		dev_dbg(dev->dev,
+			"Copying %d bytes of output data back to original place\n",
+			dev->req->nbytes);
+		s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
+				dev->req->nbytes, 1);
+	}
+	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+	s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+}
+
+/* Calls the completion. Cannot be called with dev->lock hold. */
+static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+{
+	dev->req->base.complete(&dev->req->base, err);
+}
+
+static void s5p_unset_outdata(struct s5p_aes_dev *dev)
+{
+	dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
+}
+
+static void s5p_unset_indata(struct s5p_aes_dev *dev)
+{
+	dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
+}
+
+static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
+			    struct scatterlist **dst)
+{
+	void *pages;
+	int len;
+
+	*dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
+	if (!*dst)
+		return -ENOMEM;
+
+	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+	pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
+	if (!pages) {
+		kfree(*dst);
+		*dst = NULL;
+		return -ENOMEM;
+	}
+
+	s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
+
+	sg_init_table(*dst, 1);
+	sg_set_buf(*dst, pages, len);
+
+	return 0;
+}
+
+static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+{
+	int err;
+
+	if (!sg->length) {
+		err = -EINVAL;
+		goto exit;
+	}
+
+	err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
+	if (!err) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	dev->sg_dst = sg;
+	err = 0;
+
+exit:
+	return err;
+}
+
+static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+{
+	int err;
+
+	if (!sg->length) {
+		err = -EINVAL;
+		goto exit;
+	}
+
+	err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
+	if (!err) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	dev->sg_src = sg;
+	err = 0;
+
+exit:
+	return err;
+}
+
+/*
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ *  - 0 if there is no more data,
+ *  - 1 if new transmitting (output) data is ready and its address+length
+ *     have to be written to device (by calling s5p_set_dma_outdata()).
+ */
+static int s5p_aes_tx(struct s5p_aes_dev *dev)
+{
+	int ret = 0;
+
+	s5p_unset_outdata(dev);
+
+	if (!sg_is_last(dev->sg_dst)) {
+		ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
+		if (!ret)
+			ret = 1;
+	}
+
+	return ret;
+}
+
+/*
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ *  - 0 if there is no more data,
+ *  - 1 if new receiving (input) data is ready and its address+length
+ *     have to be written to device (by calling s5p_set_dma_indata()).
+ */
+static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
+{
+	int ret = 0;
+
+	s5p_unset_indata(dev);
+
+	if (!sg_is_last(dev->sg_src)) {
+		ret = s5p_set_indata(dev, sg_next(dev->sg_src));
+		if (!ret)
+			ret = 1;
+	}
+
+	return ret;
+}
+
+static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
+{
+	return __raw_readl(dd->io_hash_base + offset);
+}
+
+static inline void s5p_hash_write(struct s5p_aes_dev *dd,
+				  u32 offset, u32 value)
+{
+	__raw_writel(value, dd->io_hash_base + offset);
+}
+
+/**
+ * s5p_set_dma_hashdata() - start DMA with sg
+ * @dev:	device
+ * @sg:		scatterlist ready to DMA transmit
+ */
+static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
+				 const struct scatterlist *sg)
+{
+	dev->hash_sg_cnt--;
+	SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
+	SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
+}
+
+/**
+ * s5p_hash_rx() - get next hash_sg_iter
+ * @dev:	device
+ *
+ * Return:
+ * 2	if there is no more data and it is UPDATE op
+ * 1	if new receiving (input) data is ready and can be written to device
+ * 0	if there is no more data and it is FINAL op
+ */
+static int s5p_hash_rx(struct s5p_aes_dev *dev)
+{
+	if (dev->hash_sg_cnt > 0) {
+		dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
+		return 1;
+	}
+
+	set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
+	if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
+		return 0;
+
+	return 2;
+}
+
+static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
+{
+	struct platform_device *pdev = dev_id;
+	struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
+	int err_dma_tx = 0;
+	int err_dma_rx = 0;
+	int err_dma_hx = 0;
+	bool tx_end = false;
+	bool hx_end = false;
+	unsigned long flags;
+	uint32_t status;
+	u32 st_bits;
+	int err;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/*
+	 * Handle rx or tx interrupt. If there is still data (scatterlist did not
+	 * reach end), then map next scatterlist entry.
+	 * In case of such mapping error, s5p_aes_complete() should be called.
+	 *
+	 * If there is no more data in tx scatter list, call s5p_aes_complete()
+	 * and schedule new tasklet.
+	 *
+	 * Handle hx interrupt. If there is still data map next entry.
+	 */
+	status = SSS_READ(dev, FCINTSTAT);
+	if (status & SSS_FCINTSTAT_BRDMAINT)
+		err_dma_rx = s5p_aes_rx(dev);
+
+	if (status & SSS_FCINTSTAT_BTDMAINT) {
+		if (sg_is_last(dev->sg_dst))
+			tx_end = true;
+		err_dma_tx = s5p_aes_tx(dev);
+	}
+
+	if (status & SSS_FCINTSTAT_HRDMAINT)
+		err_dma_hx = s5p_hash_rx(dev);
+
+	st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
+				SSS_FCINTSTAT_HRDMAINT);
+	/* clear DMA bits */
+	SSS_WRITE(dev, FCINTPEND, st_bits);
+
+	/* clear HASH irq bits */
+	if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
+		/* cannot have both HPART and HDONE */
+		if (status & SSS_FCINTSTAT_HPARTINT)
+			st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
+
+		if (status & SSS_FCINTSTAT_HDONEINT)
+			st_bits = SSS_HASH_STATUS_MSG_DONE;
+
+		set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
+		s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
+		hx_end = true;
+		/* when DONE or PART, do not handle HASH DMA */
+		err_dma_hx = 0;
+	}
+
+	if (err_dma_rx < 0) {
+		err = err_dma_rx;
+		goto error;
+	}
+	if (err_dma_tx < 0) {
+		err = err_dma_tx;
+		goto error;
+	}
+
+	if (tx_end) {
+		s5p_sg_done(dev);
+		if (err_dma_hx == 1)
+			s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		s5p_aes_complete(dev, 0);
+		/* Device is still busy */
+		tasklet_schedule(&dev->tasklet);
+	} else {
+		/*
+		 * Writing length of DMA block (either receiving or
+		 * transmitting) will start the operation immediately, so this
+		 * should be done at the end (even after clearing pending
+		 * interrupts to not miss the interrupt).
+		 */
+		if (err_dma_tx == 1)
+			s5p_set_dma_outdata(dev, dev->sg_dst);
+		if (err_dma_rx == 1)
+			s5p_set_dma_indata(dev, dev->sg_src);
+		if (err_dma_hx == 1)
+			s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+	}
+
+	goto hash_irq_end;
+
+error:
+	s5p_sg_done(dev);
+	dev->busy = false;
+	if (err_dma_hx == 1)
+		s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+	s5p_aes_complete(dev, err);
+
+hash_irq_end:
+	/*
+	 * Note about else if:
+	 *   when hash_sg_iter reaches end and its UPDATE op,
+	 *   issue SSS_HASH_PAUSE and wait for HPART irq
+	 */
+	if (hx_end)
+		tasklet_schedule(&dev->hash_tasklet);
+	else if (err_dma_hx == 2)
+		s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
+			       SSS_HASH_PAUSE);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * s5p_hash_read_msg() - read message or IV from HW
+ * @req:	AHASH request
+ */
+static void s5p_hash_read_msg(struct ahash_request *req)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+	struct s5p_aes_dev *dd = ctx->dd;
+	u32 *hash = (u32 *)ctx->digest;
+	unsigned int i;
+
+	for (i = 0; i < ctx->nregs; i++)
+		hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
+}
+
+/**
+ * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
+ * @dd:		device
+ * @ctx:	request context
+ */
+static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
+				  const struct s5p_hash_reqctx *ctx)
+{
+	const u32 *hash = (const u32 *)ctx->digest;
+	unsigned int i;
+
+	for (i = 0; i < ctx->nregs; i++)
+		s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
+}
+
+/**
+ * s5p_hash_write_iv() - write IV for next partial/finup op.
+ * @req:	AHASH request
+ */
+static void s5p_hash_write_iv(struct ahash_request *req)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+	s5p_hash_write_ctx_iv(ctx->dd, ctx);
+}
+
+/**
+ * s5p_hash_copy_result() - copy digest into req->result
+ * @req:	AHASH request
+ */
+static void s5p_hash_copy_result(struct ahash_request *req)
+{
+	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!req->result)
+		return;
+
+	memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
+}
+
+/**
+ * s5p_hash_dma_flush() - flush HASH DMA
+ * @dev:	secss device
+ */
+static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
+{
+	SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
+}
+
+/**
+ * s5p_hash_dma_enable() - enable DMA mode for HASH
+ * @dev:	secss device
+ *
+ * enable DMA mode for HASH
+ */
+static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
+{
+	s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
+}
+
+/**
+ * s5p_hash_irq_disable() - disable irq HASH signals
+ * @dev:	secss device
+ * @flags:	bitfield with irq's to be disabled
+ */
+static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
+{
+	SSS_WRITE(dev, FCINTENCLR, flags);
+}
+
+/**
+ * s5p_hash_irq_enable() - enable irq signals
+ * @dev:	secss device
+ * @flags:	bitfield with irq's to be enabled
+ */
+static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
+{
+	SSS_WRITE(dev, FCINTENSET, flags);
+}
+
+/**
+ * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
+ * @dev:	secss device
+ * @hashflow:	HASH stream flow with/without crypto AES/DES
+ */
+static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
+{
+	unsigned long flags;
+	u32 flow;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	flow = SSS_READ(dev, FCFIFOCTRL);
+	flow &= ~SSS_HASHIN_MASK;
+	flow |= hashflow;
+	SSS_WRITE(dev, FCFIFOCTRL, flow);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/**
+ * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
+ * @dev:	secss device
+ * @hashflow:	HASH stream flow with/without AES/DES
+ *
+ * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
+ * enable HASH irq's HRDMA, HDONE, HPART
+ */
+static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
+{
+	s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
+			     SSS_FCINTENCLR_HDONEINTENCLR |
+			     SSS_FCINTENCLR_HPARTINTENCLR);
+	s5p_hash_dma_flush(dev);
+
+	s5p_hash_dma_enable(dev);
+	s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
+	s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
+			    SSS_FCINTENSET_HDONEINTENSET |
+			    SSS_FCINTENSET_HPARTINTENSET);
+}
+
+/**
+ * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
+ * @dd:		secss device
+ * @length:	length for request
+ * @final:	true if final op
+ *
+ * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
+ * after previous updates, fill up IV words. For final, calculate and set
+ * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
+ * length as 2^63 so it will be never reached and set to zero prelow and
+ * prehigh.
+ *
+ * This function does not start DMA transfer.
+ */
+static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
+				bool final)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+	u32 prelow, prehigh, low, high;
+	u32 configflags, swapflags;
+	u64 tmplen;
+
+	configflags = ctx->engine | SSS_HASH_INIT_BIT;
+
+	if (likely(ctx->digcnt)) {
+		s5p_hash_write_ctx_iv(dd, ctx);
+		configflags |= SSS_HASH_USER_IV_EN;
+	}
+
+	if (final) {
+		/* number of bytes for last part */
+		low = length;
+		high = 0;
+		/* total number of bits prev hashed */
+		tmplen = ctx->digcnt * 8;
+		prelow = (u32)tmplen;
+		prehigh = (u32)(tmplen >> 32);
+	} else {
+		prelow = 0;
+		prehigh = 0;
+		low = 0;
+		high = BIT(31);
+	}
+
+	swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
+		    SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
+
+	s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
+	s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
+	s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
+	s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
+
+	s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
+	s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
+}
+
+/**
+ * s5p_hash_xmit_dma() - start DMA hash processing
+ * @dd:		secss device
+ * @length:	length for request
+ * @final:	true if final op
+ *
+ * Update digcnt here, as it is needed for finup/final op.
+ */
+static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
+			     bool final)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+	unsigned int cnt;
+
+	cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+	if (!cnt) {
+		dev_err(dd->dev, "dma_map_sg error\n");
+		ctx->error = true;
+		return -EINVAL;
+	}
+
+	set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
+	dd->hash_sg_iter = ctx->sg;
+	dd->hash_sg_cnt = cnt;
+	s5p_hash_write_ctrl(dd, length, final);
+	ctx->digcnt += length;
+	ctx->total -= length;
+
+	/* catch last interrupt */
+	if (final)
+		set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
+
+	s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
+
+	return -EINPROGRESS;
+}
+
+/**
+ * s5p_hash_copy_sgs() - copy request's bytes into new buffer
+ * @ctx:	request context
+ * @sg:		source scatterlist request
+ * @new_len:	number of bytes to process from sg
+ *
+ * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
+ * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
+ * with allocated buffer.
+ *
+ * Set bit in dd->hash_flag so we can free it after irq ends processing.
+ */
+static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
+			     struct scatterlist *sg, unsigned int new_len)
+{
+	unsigned int pages, len;
+	void *buf;
+
+	len = new_len + ctx->bufcnt;
+	pages = get_order(len);
+
+	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+	if (!buf) {
+		dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
+		ctx->error = true;
+		return -ENOMEM;
+	}
+
+	if (ctx->bufcnt)
+		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
+
+	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
+				 new_len, 0);
+	sg_init_table(ctx->sgl, 1);
+	sg_set_buf(ctx->sgl, buf, len);
+	ctx->sg = ctx->sgl;
+	ctx->sg_len = 1;
+	ctx->bufcnt = 0;
+	ctx->skip = 0;
+	set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
+
+	return 0;
+}
+
+/**
+ * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
+ * @ctx:	request context
+ * @sg:		source scatterlist request
+ * @new_len:	number of bytes to process from sg
+ *
+ * Allocate new scatterlist table, copy data for HASH into it. If there was
+ * xmit_buf filled, prepare it first, then copy page, length and offset from
+ * source sg into it, adjusting begin and/or end for skip offset and
+ * hash_later value.
+ *
+ * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
+ * it after irq ends processing.
+ */
+static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
+				  struct scatterlist *sg, unsigned int new_len)
+{
+	unsigned int skip = ctx->skip, n = sg_nents(sg);
+	struct scatterlist *tmp;
+	unsigned int len;
+
+	if (ctx->bufcnt)
+		n++;
+
+	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
+	if (!ctx->sg) {
+		ctx->error = true;
+		return -ENOMEM;
+	}
+
+	sg_init_table(ctx->sg, n);
+
+	tmp = ctx->sg;
+
+	ctx->sg_len = 0;
+
+	if (ctx->bufcnt) {
+		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
+		tmp = sg_next(tmp);
+		ctx->sg_len++;
+	}
+
+	while (sg && skip >= sg->length) {
+		skip -= sg->length;
+		sg = sg_next(sg);
+	}
+
+	while (sg && new_len) {
+		len = sg->length - skip;
+		if (new_len < len)
+			len = new_len;
+
+		new_len -= len;
+		sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
+		skip = 0;
+		if (new_len <= 0)
+			sg_mark_end(tmp);
+
+		tmp = sg_next(tmp);
+		ctx->sg_len++;
+		sg = sg_next(sg);
+	}
+
+	set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
+
+	return 0;
+}
+
+/**
+ * s5p_hash_prepare_sgs() - prepare sg for processing
+ * @ctx:	request context
+ * @sg:		source scatterlist request
+ * @nbytes:	number of bytes to process from sg
+ * @final:	final flag
+ *
+ * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
+ * sg table have good aligned elements (list_ok). If one of this checks fails,
+ * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
+ * data into this buffer and prepare request in sgl, or (2) allocates new sg
+ * table and prepare sg elements.
+ *
+ * For digest or finup all conditions can be good, and we may not need any
+ * fixes.
+ */
+static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
+				struct scatterlist *sg,
+				unsigned int new_len, bool final)
+{
+	unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
+	bool aligned = true, list_ok = true;
+	struct scatterlist *sg_tmp = sg;
+
+	if (!sg || !sg->length || !new_len)
+		return 0;
+
+	if (skip || !final)
+		list_ok = false;
+
+	while (nbytes > 0 && sg_tmp) {
+		n++;
+		if (skip >= sg_tmp->length) {
+			skip -= sg_tmp->length;
+			if (!sg_tmp->length) {
+				aligned = false;
+				break;
+			}
+		} else {
+			if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
+				aligned = false;
+				break;
+			}
+
+			if (nbytes < sg_tmp->length - skip) {
+				list_ok = false;
+				break;
+			}
+
+			nbytes -= sg_tmp->length - skip;
+			skip = 0;
+		}
+
+		sg_tmp = sg_next(sg_tmp);
+	}
+
+	if (!aligned)
+		return s5p_hash_copy_sgs(ctx, sg, new_len);
+	else if (!list_ok)
+		return s5p_hash_copy_sg_lists(ctx, sg, new_len);
+
+	/*
+	 * Have aligned data from previous operation and/or current
+	 * Note: will enter here only if (digest or finup) and aligned
+	 */
+	if (ctx->bufcnt) {
+		ctx->sg_len = n;
+		sg_init_table(ctx->sgl, 2);
+		sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
+		sg_chain(ctx->sgl, 2, sg);
+		ctx->sg = ctx->sgl;
+		ctx->sg_len++;
+	} else {
+		ctx->sg = sg;
+		ctx->sg_len = n;
+	}
+
+	return 0;
+}
+
+/**
+ * s5p_hash_prepare_request() - prepare request for processing
+ * @req:	AHASH request
+ * @update:	true if UPDATE op
+ *
+ * Note 1: we can have update flag _and_ final flag at the same time.
+ * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
+ *	   either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
+ *	   we have final op
+ */
+static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+	bool final = ctx->finup;
+	int xmit_len, hash_later, nbytes;
+	int ret;
+
+	if (update)
+		nbytes = req->nbytes;
+	else
+		nbytes = 0;
+
+	ctx->total = nbytes + ctx->bufcnt;
+	if (!ctx->total)
+		return 0;
+
+	if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
+		/* bytes left from previous request, so fill up to BUFLEN */
+		int len = BUFLEN - ctx->bufcnt % BUFLEN;
+
+		if (len > nbytes)
+			len = nbytes;
+
+		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+					 0, len, 0);
+		ctx->bufcnt += len;
+		nbytes -= len;
+		ctx->skip = len;
+	} else {
+		ctx->skip = 0;
+	}
+
+	if (ctx->bufcnt)
+		memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
+
+	xmit_len = ctx->total;
+	if (final) {
+		hash_later = 0;
+	} else {
+		if (IS_ALIGNED(xmit_len, BUFLEN))
+			xmit_len -= BUFLEN;
+		else
+			xmit_len -= xmit_len & (BUFLEN - 1);
+
+		hash_later = ctx->total - xmit_len;
+		/* copy hash_later bytes from end of req->src */
+		/* previous bytes are in xmit_buf, so no overwrite */
+		scatterwalk_map_and_copy(ctx->buffer, req->src,
+					 req->nbytes - hash_later,
+					 hash_later, 0);
+	}
+
+	if (xmit_len > BUFLEN) {
+		ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
+					   final);
+		if (ret)
+			return ret;
+	} else {
+		/* have buffered data only */
+		if (unlikely(!ctx->bufcnt)) {
+			/* first update didn't fill up buffer */
+			scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
+						 0, xmit_len, 0);
+		}
+
+		sg_init_table(ctx->sgl, 1);
+		sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
+
+		ctx->sg = ctx->sgl;
+		ctx->sg_len = 1;
+	}
+
+	ctx->bufcnt = hash_later;
+	if (!final)
+		ctx->total = xmit_len;
+
+	return 0;
+}
+
+/**
+ * s5p_hash_update_dma_stop() - unmap DMA
+ * @dd:		secss device
+ *
+ * Unmap scatterlist ctx->sg.
+ */
+static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
+{
+	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+
+	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+	clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
+}
+
+/**
+ * s5p_hash_finish() - copy calculated digest to crypto layer
+ * @req:	AHASH request
+ */
+static void s5p_hash_finish(struct ahash_request *req)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+	struct s5p_aes_dev *dd = ctx->dd;
+
+	if (ctx->digcnt)
+		s5p_hash_copy_result(req);
+
+	dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
+}
+
+/**
+ * s5p_hash_finish_req() - finish request
+ * @req:	AHASH request
+ * @err:	error
+ */
+static void s5p_hash_finish_req(struct ahash_request *req, int err)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+	struct s5p_aes_dev *dd = ctx->dd;
+	unsigned long flags;
+
+	if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
+		free_pages((unsigned long)sg_virt(ctx->sg),
+			   get_order(ctx->sg->length));
+
+	if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
+		kfree(ctx->sg);
+
+	ctx->sg = NULL;
+	dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
+			    BIT(HASH_FLAGS_SGS_COPIED));
+
+	if (!err && !ctx->error) {
+		s5p_hash_read_msg(req);
+		if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
+			s5p_hash_finish(req);
+	} else {
+		ctx->error = true;
+	}
+
+	spin_lock_irqsave(&dd->hash_lock, flags);
+	dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
+			    BIT(HASH_FLAGS_DMA_READY) |
+			    BIT(HASH_FLAGS_OUTPUT_READY));
+	spin_unlock_irqrestore(&dd->hash_lock, flags);
+
+	if (req->base.complete)
+		req->base.complete(&req->base, err);
+}
+
+/**
+ * s5p_hash_handle_queue() - handle hash queue
+ * @dd:		device s5p_aes_dev
+ * @req:	AHASH request
+ *
+ * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
+ * device then processes the first request from the dd->queue
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
+				 struct ahash_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct s5p_hash_reqctx *ctx;
+	unsigned long flags;
+	int err = 0, ret = 0;
+
+retry:
+	spin_lock_irqsave(&dd->hash_lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&dd->hash_queue, req);
+
+	if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
+		spin_unlock_irqrestore(&dd->hash_lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&dd->hash_queue);
+	async_req = crypto_dequeue_request(&dd->hash_queue);
+	if (async_req)
+		set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
+
+	spin_unlock_irqrestore(&dd->hash_lock, flags);
+
+	if (!async_req)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	req = ahash_request_cast(async_req);
+	dd->hash_req = req;
+	ctx = ahash_request_ctx(req);
+
+	err = s5p_hash_prepare_request(req, ctx->op_update);
+	if (err || !ctx->total)
+		goto out;
+
+	dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
+		ctx->op_update, req->nbytes);
+
+	s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
+	if (ctx->digcnt)
+		s5p_hash_write_iv(req); /* restore hash IV */
+
+	if (ctx->op_update) { /* HASH_OP_UPDATE */
+		err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
+		if (err != -EINPROGRESS && ctx->finup && !ctx->error)
+			/* no final() after finup() */
+			err = s5p_hash_xmit_dma(dd, ctx->total, true);
+	} else { /* HASH_OP_FINAL */
+		err = s5p_hash_xmit_dma(dd, ctx->total, true);
+	}
+out:
+	if (err != -EINPROGRESS) {
+		/* hash_tasklet_cb will not finish it, so do it here */
+		s5p_hash_finish_req(req, err);
+		req = NULL;
+
+		/*
+		 * Execute next request immediately if there is anything
+		 * in queue.
+		 */
+		goto retry;
+	}
+
+	return ret;
+}
+
+/**
+ * s5p_hash_tasklet_cb() - hash tasklet
+ * @data:	ptr to s5p_aes_dev
+ */
+static void s5p_hash_tasklet_cb(unsigned long data)
+{
+	struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
+
+	if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
+		s5p_hash_handle_queue(dd, NULL);
+		return;
+	}
+
+	if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
+		if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
+				       &dd->hash_flags)) {
+			s5p_hash_update_dma_stop(dd);
+		}
+
+		if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
+				       &dd->hash_flags)) {
+			/* hash or semi-hash ready */
+			clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
+			goto finish;
+		}
+	}
+
+	return;
+
+finish:
+	/* finish curent request */
+	s5p_hash_finish_req(dd->hash_req, 0);
+
+	/* If we are not busy, process next req */
+	if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
+		s5p_hash_handle_queue(dd, NULL);
+}
+
+/**
+ * s5p_hash_enqueue() - enqueue request
+ * @req:	AHASH request
+ * @op:		operation UPDATE (true) or FINAL (false)
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_enqueue(struct ahash_request *req, bool op)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->op_update = op;
+
+	return s5p_hash_handle_queue(tctx->dd, req);
+}
+
+/**
+ * s5p_hash_update() - process the hash input data
+ * @req:	AHASH request
+ *
+ * If request will fit in buffer, copy it and return immediately
+ * else enqueue it with OP_UPDATE.
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_update(struct ahash_request *req)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+	if (!req->nbytes)
+		return 0;
+
+	if (ctx->bufcnt + req->nbytes <= BUFLEN) {
+		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+					 0, req->nbytes, 0);
+		ctx->bufcnt += req->nbytes;
+		return 0;
+	}
+
+	return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
+}
+
+/**
+ * s5p_hash_shash_digest() - calculate shash digest
+ * @tfm:	crypto transformation
+ * @flags:	tfm flags
+ * @data:	input data
+ * @len:	length of data
+ * @out:	output buffer
+ */
+static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
+				 const u8 *data, unsigned int len, u8 *out)
+{
+	SHASH_DESC_ON_STACK(shash, tfm);
+
+	shash->tfm = tfm;
+	shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	return crypto_shash_digest(shash, data, len, out);
+}
+
+/**
+ * s5p_hash_final_shash() - calculate shash digest
+ * @req:	AHASH request
+ */
+static int s5p_hash_final_shash(struct ahash_request *req)
+{
+	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+	return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
+				     ctx->buffer, ctx->bufcnt, req->result);
+}
+
+/**
+ * s5p_hash_final() - close up hash and calculate digest
+ * @req:	AHASH request
+ *
+ * Note: in final req->src do not have any data, and req->nbytes can be
+ * non-zero.
+ *
+ * If there were no input data processed yet and the buffered hash data is
+ * less than BUFLEN (64) then calculate the final hash immediately by using
+ * SW algorithm fallback.
+ *
+ * Otherwise enqueues the current AHASH request with OP_FINAL operation op
+ * and finalize hash message in HW. Note that if digcnt!=0 then there were
+ * previous update op, so there are always some buffered bytes in ctx->buffer,
+ * which means that ctx->bufcnt!=0
+ *
+ * Returns:
+ * 0 if the request has been processed immediately,
+ * -EINPROGRESS if the operation has been queued for later execution or is set
+ *		to processing by HW,
+ * -EBUSY if queue is full and request should be resubmitted later,
+ * other negative values denotes an error.
+ */
+static int s5p_hash_final(struct ahash_request *req)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+	ctx->finup = true;
+	if (ctx->error)
+		return -EINVAL; /* uncompleted hash is not needed */
+
+	if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
+		return s5p_hash_final_shash(req);
+
+	return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
+}
+
+/**
+ * s5p_hash_finup() - process last req->src and calculate digest
+ * @req:	AHASH request containing the last update data
+ *
+ * Return values: see s5p_hash_final above.
+ */
+static int s5p_hash_finup(struct ahash_request *req)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+	int err1, err2;
+
+	ctx->finup = true;
+
+	err1 = s5p_hash_update(req);
+	if (err1 == -EINPROGRESS || err1 == -EBUSY)
+		return err1;
+
+	/*
+	 * final() has to be always called to cleanup resources even if
+	 * update() failed, except EINPROGRESS or calculate digest for small
+	 * size
+	 */
+	err2 = s5p_hash_final(req);
+
+	return err1 ?: err2;
+}
+
+/**
+ * s5p_hash_init() - initialize AHASH request contex
+ * @req:	AHASH request
+ *
+ * Init async hash request context.
+ */
+static int s5p_hash_init(struct ahash_request *req)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+
+	ctx->dd = tctx->dd;
+	ctx->error = false;
+	ctx->finup = false;
+	ctx->bufcnt = 0;
+	ctx->digcnt = 0;
+	ctx->total = 0;
+	ctx->skip = 0;
+
+	dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
+		crypto_ahash_digestsize(tfm));
+
+	switch (crypto_ahash_digestsize(tfm)) {
+	case MD5_DIGEST_SIZE:
+		ctx->engine = SSS_HASH_ENGINE_MD5;
+		ctx->nregs = HASH_MD5_MAX_REG;
+		break;
+	case SHA1_DIGEST_SIZE:
+		ctx->engine = SSS_HASH_ENGINE_SHA1;
+		ctx->nregs = HASH_SHA1_MAX_REG;
+		break;
+	case SHA256_DIGEST_SIZE:
+		ctx->engine = SSS_HASH_ENGINE_SHA256;
+		ctx->nregs = HASH_SHA256_MAX_REG;
+		break;
+	default:
+		ctx->error = true;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * s5p_hash_digest - calculate digest from req->src
+ * @req:	AHASH request
+ *
+ * Return values: see s5p_hash_final above.
+ */
+static int s5p_hash_digest(struct ahash_request *req)
+{
+	return s5p_hash_init(req) ?: s5p_hash_finup(req);
+}
+
+/**
+ * s5p_hash_cra_init_alg - init crypto alg transformation
+ * @tfm:	crypto transformation
+ */
+static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
+{
+	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+
+	tctx->dd = s5p_dev;
+	/* Allocate a fallback and abort if it failed. */
+	tctx->fallback = crypto_alloc_shash(alg_name, 0,
+					    CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(tctx->fallback)) {
+		pr_err("fallback alloc fails for '%s'\n", alg_name);
+		return PTR_ERR(tctx->fallback);
+	}
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct s5p_hash_reqctx) + BUFLEN);
+
+	return 0;
+}
+
+/**
+ * s5p_hash_cra_init - init crypto tfm
+ * @tfm:	crypto transformation
+ */
+static int s5p_hash_cra_init(struct crypto_tfm *tfm)
+{
+	return s5p_hash_cra_init_alg(tfm);
+}
+
+/**
+ * s5p_hash_cra_exit - exit crypto tfm
+ * @tfm:	crypto transformation
+ *
+ * free allocated fallback
+ */
+static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_shash(tctx->fallback);
+	tctx->fallback = NULL;
+}
+
+/**
+ * s5p_hash_export - export hash state
+ * @req:	AHASH request
+ * @out:	buffer for exported state
+ */
+static int s5p_hash_export(struct ahash_request *req, void *out)
+{
+	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+	memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
+
+	return 0;
+}
+
+/**
+ * s5p_hash_import - import hash state
+ * @req:	AHASH request
+ * @in:		buffer with state to be imported from
+ */
+static int s5p_hash_import(struct ahash_request *req, const void *in)
+{
+	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+	const struct s5p_hash_reqctx *ctx_in = in;
+
+	memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
+	if (ctx_in->bufcnt > BUFLEN) {
+		ctx->error = true;
+		return -EINVAL;
+	}
+
+	ctx->dd = tctx->dd;
+	ctx->error = false;
+
+	return 0;
+}
+
+static struct ahash_alg algs_sha1_md5_sha256[] = {
+{
+	.init		= s5p_hash_init,
+	.update		= s5p_hash_update,
+	.final		= s5p_hash_final,
+	.finup		= s5p_hash_finup,
+	.digest		= s5p_hash_digest,
+	.export		= s5p_hash_export,
+	.import		= s5p_hash_import,
+	.halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha1",
+		.cra_driver_name	= "exynos-sha1",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+					  CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= HASH_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct s5p_hash_ctx),
+		.cra_alignmask		= SSS_HASH_DMA_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= s5p_hash_cra_init,
+		.cra_exit		= s5p_hash_cra_exit,
+	}
+},
+{
+	.init		= s5p_hash_init,
+	.update		= s5p_hash_update,
+	.final		= s5p_hash_final,
+	.finup		= s5p_hash_finup,
+	.digest		= s5p_hash_digest,
+	.export		= s5p_hash_export,
+	.import		= s5p_hash_import,
+	.halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+	.halg.digestsize	= MD5_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "md5",
+		.cra_driver_name	= "exynos-md5",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+					  CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= HASH_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct s5p_hash_ctx),
+		.cra_alignmask		= SSS_HASH_DMA_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= s5p_hash_cra_init,
+		.cra_exit		= s5p_hash_cra_exit,
+	}
+},
+{
+	.init		= s5p_hash_init,
+	.update		= s5p_hash_update,
+	.final		= s5p_hash_final,
+	.finup		= s5p_hash_finup,
+	.digest		= s5p_hash_digest,
+	.export		= s5p_hash_export,
+	.import		= s5p_hash_import,
+	.halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+	.halg.digestsize	= SHA256_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha256",
+		.cra_driver_name	= "exynos-sha256",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+					  CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= HASH_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct s5p_hash_ctx),
+		.cra_alignmask		= SSS_HASH_DMA_ALIGN_MASK,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= s5p_hash_cra_init,
+		.cra_exit		= s5p_hash_cra_exit,
+	}
+}
+
+};
+
+static void s5p_set_aes(struct s5p_aes_dev *dev,
+			const uint8_t *key, const uint8_t *iv,
+			unsigned int keylen)
+{
+	void __iomem *keystart;
+
+	if (iv)
+		memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+
+	if (keylen == AES_KEYSIZE_256)
+		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
+	else if (keylen == AES_KEYSIZE_192)
+		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
+	else
+		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
+
+	memcpy_toio(keystart, key, keylen);
+}
+
+static bool s5p_is_sg_aligned(struct scatterlist *sg)
+{
+	while (sg) {
+		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+			return false;
+		sg = sg_next(sg);
+	}
+
+	return true;
+}
+
+static int s5p_set_indata_start(struct s5p_aes_dev *dev,
+				struct ablkcipher_request *req)
+{
+	struct scatterlist *sg;
+	int err;
+
+	dev->sg_src_cpy = NULL;
+	sg = req->src;
+	if (!s5p_is_sg_aligned(sg)) {
+		dev_dbg(dev->dev,
+			"At least one unaligned source scatter list, making a copy\n");
+		err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
+		if (err)
+			return err;
+
+		sg = dev->sg_src_cpy;
+	}
+
+	err = s5p_set_indata(dev, sg);
+	if (err) {
+		s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+		return err;
+	}
+
+	return 0;
+}
+
+static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
+				struct ablkcipher_request *req)
+{
+	struct scatterlist *sg;
+	int err;
+
+	dev->sg_dst_cpy = NULL;
+	sg = req->dst;
+	if (!s5p_is_sg_aligned(sg)) {
+		dev_dbg(dev->dev,
+			"At least one unaligned dest scatter list, making a copy\n");
+		err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
+		if (err)
+			return err;
+
+		sg = dev->sg_dst_cpy;
+	}
+
+	err = s5p_set_outdata(dev, sg);
+	if (err) {
+		s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+		return err;
+	}
+
+	return 0;
+}
+
+static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
+{
+	struct ablkcipher_request *req = dev->req;
+	uint32_t aes_control;
+	unsigned long flags;
+	int err;
+	u8 *iv;
+
+	aes_control = SSS_AES_KEY_CHANGE_MODE;
+	if (mode & FLAGS_AES_DECRYPT)
+		aes_control |= SSS_AES_MODE_DECRYPT;
+
+	if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
+		aes_control |= SSS_AES_CHAIN_MODE_CBC;
+		iv = req->info;
+	} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
+		aes_control |= SSS_AES_CHAIN_MODE_CTR;
+		iv = req->info;
+	} else {
+		iv = NULL; /* AES_ECB */
+	}
+
+	if (dev->ctx->keylen == AES_KEYSIZE_192)
+		aes_control |= SSS_AES_KEY_SIZE_192;
+	else if (dev->ctx->keylen == AES_KEYSIZE_256)
+		aes_control |= SSS_AES_KEY_SIZE_256;
+
+	aes_control |= SSS_AES_FIFO_MODE;
+
+	/* as a variant it is possible to use byte swapping on DMA side */
+	aes_control |= SSS_AES_BYTESWAP_DI
+		    |  SSS_AES_BYTESWAP_DO
+		    |  SSS_AES_BYTESWAP_IV
+		    |  SSS_AES_BYTESWAP_KEY
+		    |  SSS_AES_BYTESWAP_CNT;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	SSS_WRITE(dev, FCINTENCLR,
+		  SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
+	SSS_WRITE(dev, FCFIFOCTRL, 0x00);
+
+	err = s5p_set_indata_start(dev, req);
+	if (err)
+		goto indata_error;
+
+	err = s5p_set_outdata_start(dev, req);
+	if (err)
+		goto outdata_error;
+
+	SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
+	s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
+
+	s5p_set_dma_indata(dev,  dev->sg_src);
+	s5p_set_dma_outdata(dev, dev->sg_dst);
+
+	SSS_WRITE(dev, FCINTENSET,
+		  SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return;
+
+outdata_error:
+	s5p_unset_indata(dev);
+
+indata_error:
+	s5p_sg_done(dev);
+	dev->busy = false;
+	spin_unlock_irqrestore(&dev->lock, flags);
+	s5p_aes_complete(dev, err);
+}
+
+static void s5p_tasklet_cb(unsigned long data)
+{
+	struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
+	struct crypto_async_request *async_req, *backlog;
+	struct s5p_aes_reqctx *reqctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	backlog   = crypto_get_backlog(&dev->queue);
+	async_req = crypto_dequeue_request(&dev->queue);
+
+	if (!async_req) {
+		dev->busy = false;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	dev->req = ablkcipher_request_cast(async_req);
+	dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
+	reqctx   = ablkcipher_request_ctx(dev->req);
+
+	s5p_aes_crypt_start(dev, reqctx->mode);
+}
+
+static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
+			      struct ablkcipher_request *req)
+{
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	err = ablkcipher_enqueue_request(&dev->queue, req);
+	if (dev->busy) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		goto exit;
+	}
+	dev->busy = true;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	tasklet_schedule(&dev->tasklet);
+
+exit:
+	return err;
+}
+
+static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+	struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct s5p_aes_dev *dev = ctx->dev;
+
+	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+		dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
+		return -EINVAL;
+	}
+
+	reqctx->mode = mode;
+
+	return s5p_aes_handle_req(dev, req);
+}
+
+static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
+			  const uint8_t *key, unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_128 &&
+	    keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256)
+		return -EINVAL;
+
+	memcpy(ctx->aes_key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return s5p_aes_crypt(req, 0);
+}
+
+static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
+}
+
+static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return s5p_aes_crypt(req, FLAGS_AES_CBC);
+}
+
+static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
+}
+
+static int s5p_aes_cra_init(struct crypto_tfm *tfm)
+{
+	struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->dev = s5p_dev;
+	tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
+
+	return 0;
+}
+
+static struct crypto_alg algs[] = {
+	{
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "ecb-aes-s5p",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct s5p_aes_ctx),
+		.cra_alignmask		= 0x0f,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= s5p_aes_cra_init,
+		.cra_u.ablkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= s5p_aes_setkey,
+			.encrypt	= s5p_aes_ecb_encrypt,
+			.decrypt	= s5p_aes_ecb_decrypt,
+		}
+	},
+	{
+		.cra_name		= "cbc(aes)",
+		.cra_driver_name	= "cbc-aes-s5p",
+		.cra_priority		= 100,
+		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					  CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct s5p_aes_ctx),
+		.cra_alignmask		= 0x0f,
+		.cra_type		= &crypto_ablkcipher_type,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= s5p_aes_cra_init,
+		.cra_u.ablkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.setkey		= s5p_aes_setkey,
+			.encrypt	= s5p_aes_cbc_encrypt,
+			.decrypt	= s5p_aes_cbc_decrypt,
+		}
+	},
+};
+
+static int s5p_aes_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int i, j, err = -ENODEV;
+	const struct samsung_aes_variant *variant;
+	struct s5p_aes_dev *pdata;
+	struct resource *res;
+	unsigned int hash_i;
+
+	if (s5p_dev)
+		return -EEXIST;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	variant = find_s5p_sss_version(pdev);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	/*
+	 * Note: HASH and PRNG uses the same registers in secss, avoid
+	 * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
+	 * is enabled in config. We need larger size for HASH registers in
+	 * secss, current describe only AES/DES
+	 */
+	if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
+		if (variant == &exynos_aes_data) {
+			res->end += 0x300;
+			pdata->use_hash = true;
+		}
+	}
+
+	pdata->res = res;
+	pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pdata->ioaddr)) {
+		if (!pdata->use_hash)
+			return PTR_ERR(pdata->ioaddr);
+		/* try AES without HASH */
+		res->end -= 0x300;
+		pdata->use_hash = false;
+		pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(pdata->ioaddr))
+			return PTR_ERR(pdata->ioaddr);
+	}
+
+	pdata->clk = devm_clk_get(dev, "secss");
+	if (IS_ERR(pdata->clk)) {
+		dev_err(dev, "failed to find secss clock source\n");
+		return -ENOENT;
+	}
+
+	err = clk_prepare_enable(pdata->clk);
+	if (err < 0) {
+		dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
+		return err;
+	}
+
+	spin_lock_init(&pdata->lock);
+	spin_lock_init(&pdata->hash_lock);
+
+	pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
+	pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
+
+	pdata->irq_fc = platform_get_irq(pdev, 0);
+	if (pdata->irq_fc < 0) {
+		err = pdata->irq_fc;
+		dev_warn(dev, "feed control interrupt is not available.\n");
+		goto err_irq;
+	}
+	err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
+					s5p_aes_interrupt, IRQF_ONESHOT,
+					pdev->name, pdev);
+	if (err < 0) {
+		dev_warn(dev, "feed control interrupt is not available.\n");
+		goto err_irq;
+	}
+
+	pdata->busy = false;
+	pdata->dev = dev;
+	platform_set_drvdata(pdev, pdata);
+	s5p_dev = pdata;
+
+	tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
+	crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
+
+	for (i = 0; i < ARRAY_SIZE(algs); i++) {
+		err = crypto_register_alg(&algs[i]);
+		if (err)
+			goto err_algs;
+	}
+
+	if (pdata->use_hash) {
+		tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
+			     (unsigned long)pdata);
+		crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
+
+		for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
+		     hash_i++) {
+			struct ahash_alg *alg;
+
+			alg = &algs_sha1_md5_sha256[hash_i];
+			err = crypto_register_ahash(alg);
+			if (err) {
+				dev_err(dev, "can't register '%s': %d\n",
+					alg->halg.base.cra_driver_name, err);
+				goto err_hash;
+			}
+		}
+	}
+
+	dev_info(dev, "s5p-sss driver registered\n");
+
+	return 0;
+
+err_hash:
+	for (j = hash_i - 1; j >= 0; j--)
+		crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
+
+	tasklet_kill(&pdata->hash_tasklet);
+	res->end -= 0x300;
+
+err_algs:
+	if (i < ARRAY_SIZE(algs))
+		dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
+			err);
+
+	for (j = 0; j < i; j++)
+		crypto_unregister_alg(&algs[j]);
+
+	tasklet_kill(&pdata->tasklet);
+
+err_irq:
+	clk_disable_unprepare(pdata->clk);
+
+	s5p_dev = NULL;
+
+	return err;
+}
+
+static int s5p_aes_remove(struct platform_device *pdev)
+{
+	struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
+	int i;
+
+	if (!pdata)
+		return -ENODEV;
+
+	for (i = 0; i < ARRAY_SIZE(algs); i++)
+		crypto_unregister_alg(&algs[i]);
+
+	tasklet_kill(&pdata->tasklet);
+	if (pdata->use_hash) {
+		for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
+			crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
+
+		pdata->res->end -= 0x300;
+		tasklet_kill(&pdata->hash_tasklet);
+		pdata->use_hash = false;
+	}
+
+	clk_disable_unprepare(pdata->clk);
+	s5p_dev = NULL;
+
+	return 0;
+}
+
+static struct platform_driver s5p_aes_crypto = {
+	.probe	= s5p_aes_probe,
+	.remove	= s5p_aes_remove,
+	.driver	= {
+		.name	= "s5p-secss",
+		.of_match_table = s5p_sss_dt_match,
+	},
+};
+
+module_platform_driver(s5p_aes_crypto);
+
+MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
+MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
new file mode 100644
index 0000000..e7540a5
--- /dev/null
+++ b/drivers/crypto/sahara.c
@@ -0,0 +1,1578 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for SAHARA cryptographic accelerator.
+ *
+ * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ * Copyright (c) 2013 Vista Silicon S.L.
+ * Author: Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on omap-aes.c and tegra-aes.c
+ */
+
+#include <crypto/aes.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define SHA_BUFFER_LEN		PAGE_SIZE
+#define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
+
+#define SAHARA_NAME "sahara"
+#define SAHARA_VERSION_3	3
+#define SAHARA_VERSION_4	4
+#define SAHARA_TIMEOUT_MS	1000
+#define SAHARA_MAX_HW_DESC	2
+#define SAHARA_MAX_HW_LINK	20
+
+#define FLAGS_MODE_MASK		0x000f
+#define FLAGS_ENCRYPT		BIT(0)
+#define FLAGS_CBC		BIT(1)
+#define FLAGS_NEW_KEY		BIT(3)
+
+#define SAHARA_HDR_BASE			0x00800000
+#define SAHARA_HDR_SKHA_ALG_AES	0
+#define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
+#define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
+#define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
+#define SAHARA_HDR_FORM_DATA		(5 << 16)
+#define SAHARA_HDR_FORM_KEY		(8 << 16)
+#define SAHARA_HDR_LLO			(1 << 24)
+#define SAHARA_HDR_CHA_SKHA		(1 << 28)
+#define SAHARA_HDR_CHA_MDHA		(2 << 28)
+#define SAHARA_HDR_PARITY_BIT		(1 << 31)
+
+#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
+#define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
+#define SAHARA_HDR_MDHA_HASH		0xA0850000
+#define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
+#define SAHARA_HDR_MDHA_ALG_SHA1	0
+#define SAHARA_HDR_MDHA_ALG_MD5		1
+#define SAHARA_HDR_MDHA_ALG_SHA256	2
+#define SAHARA_HDR_MDHA_ALG_SHA224	3
+#define SAHARA_HDR_MDHA_PDATA		(1 << 2)
+#define SAHARA_HDR_MDHA_HMAC		(1 << 3)
+#define SAHARA_HDR_MDHA_INIT		(1 << 5)
+#define SAHARA_HDR_MDHA_IPAD		(1 << 6)
+#define SAHARA_HDR_MDHA_OPAD		(1 << 7)
+#define SAHARA_HDR_MDHA_SWAP		(1 << 8)
+#define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
+#define SAHARA_HDR_MDHA_SSL		(1 << 10)
+
+/* SAHARA can only process one request at a time */
+#define SAHARA_QUEUE_LENGTH	1
+
+#define SAHARA_REG_VERSION	0x00
+#define SAHARA_REG_DAR		0x04
+#define SAHARA_REG_CONTROL	0x08
+#define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
+#define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
+#define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
+#define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
+#define SAHARA_REG_CMD		0x0C
+#define		SAHARA_CMD_RESET		(1 << 0)
+#define		SAHARA_CMD_CLEAR_INT		(1 << 8)
+#define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
+#define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
+#define		SAHARA_CMD_MODE_BATCH		(1 << 16)
+#define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
+#define	SAHARA_REG_STATUS	0x10
+#define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
+#define			SAHARA_STATE_IDLE	0
+#define			SAHARA_STATE_BUSY	1
+#define			SAHARA_STATE_ERR	2
+#define			SAHARA_STATE_FAULT	3
+#define			SAHARA_STATE_COMPLETE	4
+#define			SAHARA_STATE_COMP_FLAG	(1 << 2)
+#define		SAHARA_STATUS_DAR_FULL		(1 << 3)
+#define		SAHARA_STATUS_ERROR		(1 << 4)
+#define		SAHARA_STATUS_SECURE		(1 << 5)
+#define		SAHARA_STATUS_FAIL		(1 << 6)
+#define		SAHARA_STATUS_INIT		(1 << 7)
+#define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
+#define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
+#define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
+#define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
+#define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
+#define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
+#define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
+#define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
+#define SAHARA_REG_ERRSTATUS	0x14
+#define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
+#define			SAHARA_ERRSOURCE_CHA	14
+#define			SAHARA_ERRSOURCE_DMA	15
+#define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
+#define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
+#define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
+#define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
+#define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
+#define SAHARA_REG_FADDR	0x18
+#define SAHARA_REG_CDAR		0x1C
+#define SAHARA_REG_IDAR		0x20
+
+struct sahara_hw_desc {
+	u32	hdr;
+	u32	len1;
+	u32	p1;
+	u32	len2;
+	u32	p2;
+	u32	next;
+};
+
+struct sahara_hw_link {
+	u32	len;
+	u32	p;
+	u32	next;
+};
+
+struct sahara_ctx {
+	unsigned long flags;
+
+	/* AES-specific context */
+	int keylen;
+	u8 key[AES_KEYSIZE_128];
+	struct crypto_skcipher *fallback;
+};
+
+struct sahara_aes_reqctx {
+	unsigned long mode;
+};
+
+/*
+ * struct sahara_sha_reqctx - private data per request
+ * @buf: holds data for requests smaller than block_size
+ * @rembuf: used to prepare one block_size-aligned request
+ * @context: hw-specific context for request. Digest is extracted from this
+ * @mode: specifies what type of hw-descriptor needs to be built
+ * @digest_size: length of digest for this request
+ * @context_size: length of hw-context for this request.
+ *                Always digest_size + 4
+ * @buf_cnt: number of bytes saved in buf
+ * @sg_in_idx: number of hw links
+ * @in_sg: scatterlist for input data
+ * @in_sg_chain: scatterlists for chained input data
+ * @total: total number of bytes for transfer
+ * @last: is this the last block
+ * @first: is this the first block
+ * @active: inside a transfer
+ */
+struct sahara_sha_reqctx {
+	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
+	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
+	u8			context[SHA256_DIGEST_SIZE + 4];
+	unsigned int		mode;
+	unsigned int		digest_size;
+	unsigned int		context_size;
+	unsigned int		buf_cnt;
+	unsigned int		sg_in_idx;
+	struct scatterlist	*in_sg;
+	struct scatterlist	in_sg_chain[2];
+	size_t			total;
+	unsigned int		last;
+	unsigned int		first;
+	unsigned int		active;
+};
+
+struct sahara_dev {
+	struct device		*device;
+	unsigned int		version;
+	void __iomem		*regs_base;
+	struct clk		*clk_ipg;
+	struct clk		*clk_ahb;
+	struct mutex		queue_mutex;
+	struct task_struct	*kthread;
+	struct completion	dma_completion;
+
+	struct sahara_ctx	*ctx;
+	struct crypto_queue	queue;
+	unsigned long		flags;
+
+	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
+	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
+
+	u8			*key_base;
+	dma_addr_t		key_phys_base;
+
+	u8			*iv_base;
+	dma_addr_t		iv_phys_base;
+
+	u8			*context_base;
+	dma_addr_t		context_phys_base;
+
+	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
+	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
+
+	size_t			total;
+	struct scatterlist	*in_sg;
+	int		nb_in_sg;
+	struct scatterlist	*out_sg;
+	int		nb_out_sg;
+
+	u32			error;
+};
+
+static struct sahara_dev *dev_ptr;
+
+static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
+{
+	writel(data, dev->regs_base + reg);
+}
+
+static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
+{
+	return readl(dev->regs_base + reg);
+}
+
+static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
+{
+	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
+			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
+			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
+
+	if (dev->flags & FLAGS_CBC) {
+		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
+		hdr ^= SAHARA_HDR_PARITY_BIT;
+	}
+
+	if (dev->flags & FLAGS_ENCRYPT) {
+		hdr |= SAHARA_HDR_SKHA_OP_ENC;
+		hdr ^= SAHARA_HDR_PARITY_BIT;
+	}
+
+	return hdr;
+}
+
+static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
+{
+	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
+			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
+}
+
+static const char *sahara_err_src[16] = {
+	"No error",
+	"Header error",
+	"Descriptor length error",
+	"Descriptor length or pointer error",
+	"Link length error",
+	"Link pointer error",
+	"Input buffer error",
+	"Output buffer error",
+	"Output buffer starvation",
+	"Internal state fault",
+	"General descriptor problem",
+	"Reserved",
+	"Descriptor address error",
+	"Link address error",
+	"CHA error",
+	"DMA error"
+};
+
+static const char *sahara_err_dmasize[4] = {
+	"Byte transfer",
+	"Half-word transfer",
+	"Word transfer",
+	"Reserved"
+};
+
+static const char *sahara_err_dmasrc[8] = {
+	"No error",
+	"AHB bus error",
+	"Internal IP bus error",
+	"Parity error",
+	"DMA crosses 256 byte boundary",
+	"DMA is busy",
+	"Reserved",
+	"DMA HW error"
+};
+
+static const char *sahara_cha_errsrc[12] = {
+	"Input buffer non-empty",
+	"Illegal address",
+	"Illegal mode",
+	"Illegal data size",
+	"Illegal key size",
+	"Write during processing",
+	"CTX read during processing",
+	"HW error",
+	"Input buffer disabled/underflow",
+	"Output buffer disabled/overflow",
+	"DES key parity error",
+	"Reserved"
+};
+
+static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
+
+static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
+{
+	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
+	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
+
+	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
+
+	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
+
+	if (source == SAHARA_ERRSOURCE_DMA) {
+		if (error & SAHARA_ERRSTATUS_DMA_DIR)
+			dev_err(dev->device, "		* DMA read.\n");
+		else
+			dev_err(dev->device, "		* DMA write.\n");
+
+		dev_err(dev->device, "		* %s.\n",
+		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
+		dev_err(dev->device, "		* %s.\n",
+		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
+	} else if (source == SAHARA_ERRSOURCE_CHA) {
+		dev_err(dev->device, "		* %s.\n",
+			sahara_cha_errsrc[chasrc]);
+		dev_err(dev->device, "		* %s.\n",
+		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
+	}
+	dev_err(dev->device, "\n");
+}
+
+static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
+
+static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
+{
+	u8 state;
+
+	if (!IS_ENABLED(DEBUG))
+		return;
+
+	state = SAHARA_STATUS_GET_STATE(status);
+
+	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
+		__func__, status);
+
+	dev_dbg(dev->device, "	- State = %d:\n", state);
+	if (state & SAHARA_STATE_COMP_FLAG)
+		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
+
+	dev_dbg(dev->device, "		* %s.\n",
+	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
+
+	if (status & SAHARA_STATUS_DAR_FULL)
+		dev_dbg(dev->device, "	- DAR Full.\n");
+	if (status & SAHARA_STATUS_ERROR)
+		dev_dbg(dev->device, "	- Error.\n");
+	if (status & SAHARA_STATUS_SECURE)
+		dev_dbg(dev->device, "	- Secure.\n");
+	if (status & SAHARA_STATUS_FAIL)
+		dev_dbg(dev->device, "	- Fail.\n");
+	if (status & SAHARA_STATUS_RNG_RESEED)
+		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
+	if (status & SAHARA_STATUS_ACTIVE_RNG)
+		dev_dbg(dev->device, "	- RNG Active.\n");
+	if (status & SAHARA_STATUS_ACTIVE_MDHA)
+		dev_dbg(dev->device, "	- MDHA Active.\n");
+	if (status & SAHARA_STATUS_ACTIVE_SKHA)
+		dev_dbg(dev->device, "	- SKHA Active.\n");
+
+	if (status & SAHARA_STATUS_MODE_BATCH)
+		dev_dbg(dev->device, "	- Batch Mode.\n");
+	else if (status & SAHARA_STATUS_MODE_DEDICATED)
+		dev_dbg(dev->device, "	- Dedicated Mode.\n");
+	else if (status & SAHARA_STATUS_MODE_DEBUG)
+		dev_dbg(dev->device, "	- Debug Mode.\n");
+
+	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
+	       SAHARA_STATUS_GET_ISTATE(status));
+
+	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
+		sahara_read(dev, SAHARA_REG_CDAR));
+	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
+		sahara_read(dev, SAHARA_REG_IDAR));
+}
+
+static void sahara_dump_descriptors(struct sahara_dev *dev)
+{
+	int i;
+
+	if (!IS_ENABLED(DEBUG))
+		return;
+
+	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
+		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
+			i, &dev->hw_phys_desc[i]);
+		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
+		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
+		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
+		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
+		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
+		dev_dbg(dev->device, "\tnext = 0x%08x\n",
+			dev->hw_desc[i]->next);
+	}
+	dev_dbg(dev->device, "\n");
+}
+
+static void sahara_dump_links(struct sahara_dev *dev)
+{
+	int i;
+
+	if (!IS_ENABLED(DEBUG))
+		return;
+
+	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
+		dev_dbg(dev->device, "Link (%d) (%pad):\n",
+			i, &dev->hw_phys_link[i]);
+		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
+		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
+		dev_dbg(dev->device, "\tnext = 0x%08x\n",
+			dev->hw_link[i]->next);
+	}
+	dev_dbg(dev->device, "\n");
+}
+
+static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+{
+	struct sahara_ctx *ctx = dev->ctx;
+	struct scatterlist *sg;
+	int ret;
+	int i, j;
+	int idx = 0;
+
+	/* Copy new key if necessary */
+	if (ctx->flags & FLAGS_NEW_KEY) {
+		memcpy(dev->key_base, ctx->key, ctx->keylen);
+		ctx->flags &= ~FLAGS_NEW_KEY;
+
+		if (dev->flags & FLAGS_CBC) {
+			dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+			dev->hw_desc[idx]->p1 = dev->iv_phys_base;
+		} else {
+			dev->hw_desc[idx]->len1 = 0;
+			dev->hw_desc[idx]->p1 = 0;
+		}
+		dev->hw_desc[idx]->len2 = ctx->keylen;
+		dev->hw_desc[idx]->p2 = dev->key_phys_base;
+		dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+
+		dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+
+		idx++;
+	}
+
+	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
+	if (dev->nb_in_sg < 0) {
+		dev_err(dev->device, "Invalid numbers of src SG.\n");
+		return dev->nb_in_sg;
+	}
+	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
+	if (dev->nb_out_sg < 0) {
+		dev_err(dev->device, "Invalid numbers of dst SG.\n");
+		return dev->nb_out_sg;
+	}
+	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
+		dev_err(dev->device, "not enough hw links (%d)\n",
+			dev->nb_in_sg + dev->nb_out_sg);
+		return -EINVAL;
+	}
+
+	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+			 DMA_TO_DEVICE);
+	if (ret != dev->nb_in_sg) {
+		dev_err(dev->device, "couldn't map in sg\n");
+		goto unmap_in;
+	}
+	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+			 DMA_FROM_DEVICE);
+	if (ret != dev->nb_out_sg) {
+		dev_err(dev->device, "couldn't map out sg\n");
+		goto unmap_out;
+	}
+
+	/* Create input links */
+	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
+	sg = dev->in_sg;
+	for (i = 0; i < dev->nb_in_sg; i++) {
+		dev->hw_link[i]->len = sg->length;
+		dev->hw_link[i]->p = sg->dma_address;
+		if (i == (dev->nb_in_sg - 1)) {
+			dev->hw_link[i]->next = 0;
+		} else {
+			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+			sg = sg_next(sg);
+		}
+	}
+
+	/* Create output links */
+	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
+	sg = dev->out_sg;
+	for (j = i; j < dev->nb_out_sg + i; j++) {
+		dev->hw_link[j]->len = sg->length;
+		dev->hw_link[j]->p = sg->dma_address;
+		if (j == (dev->nb_out_sg + i - 1)) {
+			dev->hw_link[j]->next = 0;
+		} else {
+			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
+			sg = sg_next(sg);
+		}
+	}
+
+	/* Fill remaining fields of hw_desc[1] */
+	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
+	dev->hw_desc[idx]->len1 = dev->total;
+	dev->hw_desc[idx]->len2 = dev->total;
+	dev->hw_desc[idx]->next = 0;
+
+	sahara_dump_descriptors(dev);
+	sahara_dump_links(dev);
+
+	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
+
+	return 0;
+
+unmap_out:
+	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+		DMA_FROM_DEVICE);
+unmap_in:
+	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+		DMA_TO_DEVICE);
+
+	return -EINVAL;
+}
+
+static int sahara_aes_process(struct ablkcipher_request *req)
+{
+	struct sahara_dev *dev = dev_ptr;
+	struct sahara_ctx *ctx;
+	struct sahara_aes_reqctx *rctx;
+	int ret;
+	unsigned long timeout;
+
+	/* Request is ready to be dispatched by the device */
+	dev_dbg(dev->device,
+		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
+		req->nbytes, req->src, req->dst);
+
+	/* assign new request to device */
+	dev->total = req->nbytes;
+	dev->in_sg = req->src;
+	dev->out_sg = req->dst;
+
+	rctx = ablkcipher_request_ctx(req);
+	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+	rctx->mode &= FLAGS_MODE_MASK;
+	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+	if ((dev->flags & FLAGS_CBC) && req->info)
+		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
+
+	/* assign new context to device */
+	dev->ctx = ctx;
+
+	reinit_completion(&dev->dma_completion);
+
+	ret = sahara_hw_descriptor_create(dev);
+	if (ret)
+		return -EINVAL;
+
+	timeout = wait_for_completion_timeout(&dev->dma_completion,
+				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+	if (!timeout) {
+		dev_err(dev->device, "AES timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+		DMA_FROM_DEVICE);
+	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+		DMA_TO_DEVICE);
+
+	return 0;
+}
+
+static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			     unsigned int keylen)
+{
+	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	int ret;
+
+	ctx->keylen = keylen;
+
+	/* SAHARA only supports 128bit keys */
+	if (keylen == AES_KEYSIZE_128) {
+		memcpy(ctx->key, key, keylen);
+		ctx->flags |= FLAGS_NEW_KEY;
+		return 0;
+	}
+
+	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+		return -EINVAL;
+
+	/*
+	 * The requested key size is not supported by HW, do a fallback.
+	 */
+	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+						 CRYPTO_TFM_REQ_MASK);
+
+	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+	tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
+	tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
+			       CRYPTO_TFM_RES_MASK;
+	return ret;
+}
+
+static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct sahara_dev *dev = dev_ptr;
+	int err = 0;
+
+	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
+		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+
+	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+		dev_err(dev->device,
+			"request size is not exact amount of AES blocks\n");
+		return -EINVAL;
+	}
+
+	rctx->mode = mode;
+
+	mutex_lock(&dev->queue_mutex);
+	err = ablkcipher_enqueue_request(&dev->queue, req);
+	mutex_unlock(&dev->queue_mutex);
+
+	wake_up_process(dev->kthread);
+
+	return err;
+}
+
+static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+		crypto_ablkcipher_reqtfm(req));
+	int err;
+
+	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+		skcipher_request_set_tfm(subreq, ctx->fallback);
+		skcipher_request_set_callback(subreq, req->base.flags,
+					      NULL, NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+					   req->nbytes, req->info);
+		err = crypto_skcipher_encrypt(subreq);
+		skcipher_request_zero(subreq);
+		return err;
+	}
+
+	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+		crypto_ablkcipher_reqtfm(req));
+	int err;
+
+	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+		skcipher_request_set_tfm(subreq, ctx->fallback);
+		skcipher_request_set_callback(subreq, req->base.flags,
+					      NULL, NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+					   req->nbytes, req->info);
+		err = crypto_skcipher_decrypt(subreq);
+		skcipher_request_zero(subreq);
+		return err;
+	}
+
+	return sahara_aes_crypt(req, 0);
+}
+
+static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+		crypto_ablkcipher_reqtfm(req));
+	int err;
+
+	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+		skcipher_request_set_tfm(subreq, ctx->fallback);
+		skcipher_request_set_callback(subreq, req->base.flags,
+					      NULL, NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+					   req->nbytes, req->info);
+		err = crypto_skcipher_encrypt(subreq);
+		skcipher_request_zero(subreq);
+		return err;
+	}
+
+	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+		crypto_ablkcipher_reqtfm(req));
+	int err;
+
+	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+		skcipher_request_set_tfm(subreq, ctx->fallback);
+		skcipher_request_set_callback(subreq, req->base.flags,
+					      NULL, NULL);
+		skcipher_request_set_crypt(subreq, req->src, req->dst,
+					   req->nbytes, req->info);
+		err = crypto_skcipher_decrypt(subreq);
+		skcipher_request_zero(subreq);
+		return err;
+	}
+
+	return sahara_aes_crypt(req, FLAGS_CBC);
+}
+
+static int sahara_aes_cra_init(struct crypto_tfm *tfm)
+{
+	const char *name = crypto_tfm_alg_name(tfm);
+	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->fallback = crypto_alloc_skcipher(name, 0,
+					      CRYPTO_ALG_ASYNC |
+					      CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->fallback)) {
+		pr_err("Error allocating fallback algo %s\n", name);
+		return PTR_ERR(ctx->fallback);
+	}
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
+
+	return 0;
+}
+
+static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
+{
+	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_skcipher(ctx->fallback);
+}
+
+static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
+			      struct sahara_sha_reqctx *rctx)
+{
+	u32 hdr = 0;
+
+	hdr = rctx->mode;
+
+	if (rctx->first) {
+		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
+		hdr |= SAHARA_HDR_MDHA_INIT;
+	} else {
+		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
+	}
+
+	if (rctx->last)
+		hdr |= SAHARA_HDR_MDHA_PDATA;
+
+	if (hweight_long(hdr) % 2 == 0)
+		hdr |= SAHARA_HDR_PARITY_BIT;
+
+	return hdr;
+}
+
+static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+				       struct sahara_sha_reqctx *rctx,
+				       int start)
+{
+	struct scatterlist *sg;
+	unsigned int i;
+	int ret;
+
+	dev->in_sg = rctx->in_sg;
+
+	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
+	if (dev->nb_in_sg < 0) {
+		dev_err(dev->device, "Invalid numbers of src SG.\n");
+		return dev->nb_in_sg;
+	}
+	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
+		dev_err(dev->device, "not enough hw links (%d)\n",
+			dev->nb_in_sg + dev->nb_out_sg);
+		return -EINVAL;
+	}
+
+	sg = dev->in_sg;
+	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
+	if (!ret)
+		return -EFAULT;
+
+	for (i = start; i < dev->nb_in_sg + start; i++) {
+		dev->hw_link[i]->len = sg->length;
+		dev->hw_link[i]->p = sg->dma_address;
+		if (i == (dev->nb_in_sg + start - 1)) {
+			dev->hw_link[i]->next = 0;
+		} else {
+			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+			sg = sg_next(sg);
+		}
+	}
+
+	return i;
+}
+
+static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
+						struct sahara_sha_reqctx *rctx,
+						struct ahash_request *req,
+						int index)
+{
+	unsigned result_len;
+	int i = index;
+
+	if (rctx->first)
+		/* Create initial descriptor: #8*/
+		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
+	else
+		/* Create hash descriptor: #10. Must follow #6. */
+		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
+
+	dev->hw_desc[index]->len1 = rctx->total;
+	if (dev->hw_desc[index]->len1 == 0) {
+		/* if len1 is 0, p1 must be 0, too */
+		dev->hw_desc[index]->p1 = 0;
+		rctx->sg_in_idx = 0;
+	} else {
+		/* Create input links */
+		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
+		i = sahara_sha_hw_links_create(dev, rctx, index);
+
+		rctx->sg_in_idx = index;
+		if (i < 0)
+			return i;
+	}
+
+	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
+
+	/* Save the context for the next operation */
+	result_len = rctx->context_size;
+	dev->hw_link[i]->p = dev->context_phys_base;
+
+	dev->hw_link[i]->len = result_len;
+	dev->hw_desc[index]->len2 = result_len;
+
+	dev->hw_link[i]->next = 0;
+
+	return 0;
+}
+
+/*
+ * Load descriptor aka #6
+ *
+ * To load a previously saved context back to the MDHA unit
+ *
+ * p1: Saved Context
+ * p2: NULL
+ *
+ */
+static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
+						struct sahara_sha_reqctx *rctx,
+						struct ahash_request *req,
+						int index)
+{
+	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
+
+	dev->hw_desc[index]->len1 = rctx->context_size;
+	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
+	dev->hw_desc[index]->len2 = 0;
+	dev->hw_desc[index]->p2 = 0;
+
+	dev->hw_link[index]->len = rctx->context_size;
+	dev->hw_link[index]->p = dev->context_phys_base;
+	dev->hw_link[index]->next = 0;
+
+	return 0;
+}
+
+static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
+{
+	if (!sg || !sg->length)
+		return nbytes;
+
+	while (nbytes && sg) {
+		if (nbytes <= sg->length) {
+			sg->length = nbytes;
+			sg_mark_end(sg);
+			break;
+		}
+		nbytes -= sg->length;
+		sg = sg_next(sg);
+	}
+
+	return nbytes;
+}
+
+static int sahara_sha_prepare_request(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+	unsigned int hash_later;
+	unsigned int block_size;
+	unsigned int len;
+
+	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	/* append bytes from previous operation */
+	len = rctx->buf_cnt + req->nbytes;
+
+	/* only the last transfer can be padded in hardware */
+	if (!rctx->last && (len < block_size)) {
+		/* to few data, save for next operation */
+		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
+					 0, req->nbytes, 0);
+		rctx->buf_cnt += req->nbytes;
+
+		return 0;
+	}
+
+	/* add data from previous operation first */
+	if (rctx->buf_cnt)
+		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
+
+	/* data must always be a multiple of block_size */
+	hash_later = rctx->last ? 0 : len & (block_size - 1);
+	if (hash_later) {
+		unsigned int offset = req->nbytes - hash_later;
+		/* Save remaining bytes for later use */
+		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
+					hash_later, 0);
+	}
+
+	/* nbytes should now be multiple of blocksize */
+	req->nbytes = req->nbytes - hash_later;
+
+	sahara_walk_and_recalc(req->src, req->nbytes);
+
+	/* have data from previous operation and current */
+	if (rctx->buf_cnt && req->nbytes) {
+		sg_init_table(rctx->in_sg_chain, 2);
+		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
+
+		sg_chain(rctx->in_sg_chain, 2, req->src);
+
+		rctx->total = req->nbytes + rctx->buf_cnt;
+		rctx->in_sg = rctx->in_sg_chain;
+
+		req->src = rctx->in_sg_chain;
+	/* only data from previous operation */
+	} else if (rctx->buf_cnt) {
+		if (req->src)
+			rctx->in_sg = req->src;
+		else
+			rctx->in_sg = rctx->in_sg_chain;
+		/* buf was copied into rembuf above */
+		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
+		rctx->total = rctx->buf_cnt;
+	/* no data from previous operation */
+	} else {
+		rctx->in_sg = req->src;
+		rctx->total = req->nbytes;
+		req->src = rctx->in_sg;
+	}
+
+	/* on next call, we only have the remaining data in the buffer */
+	rctx->buf_cnt = hash_later;
+
+	return -EINPROGRESS;
+}
+
+static int sahara_sha_process(struct ahash_request *req)
+{
+	struct sahara_dev *dev = dev_ptr;
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+	int ret;
+	unsigned long timeout;
+
+	ret = sahara_sha_prepare_request(req);
+	if (!ret)
+		return ret;
+
+	if (rctx->first) {
+		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+		dev->hw_desc[0]->next = 0;
+		rctx->first = 0;
+	} else {
+		memcpy(dev->context_base, rctx->context, rctx->context_size);
+
+		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
+		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+		dev->hw_desc[1]->next = 0;
+	}
+
+	sahara_dump_descriptors(dev);
+	sahara_dump_links(dev);
+
+	reinit_completion(&dev->dma_completion);
+
+	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
+
+	timeout = wait_for_completion_timeout(&dev->dma_completion,
+				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+	if (!timeout) {
+		dev_err(dev->device, "SHA timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	if (rctx->sg_in_idx)
+		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+			     DMA_TO_DEVICE);
+
+	memcpy(rctx->context, dev->context_base, rctx->context_size);
+
+	if (req->result)
+		memcpy(req->result, rctx->context, rctx->digest_size);
+
+	return 0;
+}
+
+static int sahara_queue_manage(void *data)
+{
+	struct sahara_dev *dev = (struct sahara_dev *)data;
+	struct crypto_async_request *async_req;
+	struct crypto_async_request *backlog;
+	int ret = 0;
+
+	do {
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		mutex_lock(&dev->queue_mutex);
+		backlog = crypto_get_backlog(&dev->queue);
+		async_req = crypto_dequeue_request(&dev->queue);
+		mutex_unlock(&dev->queue_mutex);
+
+		if (backlog)
+			backlog->complete(backlog, -EINPROGRESS);
+
+		if (async_req) {
+			if (crypto_tfm_alg_type(async_req->tfm) ==
+			    CRYPTO_ALG_TYPE_AHASH) {
+				struct ahash_request *req =
+					ahash_request_cast(async_req);
+
+				ret = sahara_sha_process(req);
+			} else {
+				struct ablkcipher_request *req =
+					ablkcipher_request_cast(async_req);
+
+				ret = sahara_aes_process(req);
+			}
+
+			async_req->complete(async_req, ret);
+
+			continue;
+		}
+
+		schedule();
+	} while (!kthread_should_stop());
+
+	return 0;
+}
+
+static int sahara_sha_enqueue(struct ahash_request *req, int last)
+{
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct sahara_dev *dev = dev_ptr;
+	int ret;
+
+	if (!req->nbytes && !last)
+		return 0;
+
+	rctx->last = last;
+
+	if (!rctx->active) {
+		rctx->active = 1;
+		rctx->first = 1;
+	}
+
+	mutex_lock(&dev->queue_mutex);
+	ret = crypto_enqueue_request(&dev->queue, &req->base);
+	mutex_unlock(&dev->queue_mutex);
+
+	wake_up_process(dev->kthread);
+
+	return ret;
+}
+
+static int sahara_sha_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+	memset(rctx, 0, sizeof(*rctx));
+
+	switch (crypto_ahash_digestsize(tfm)) {
+	case SHA1_DIGEST_SIZE:
+		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
+		rctx->digest_size = SHA1_DIGEST_SIZE;
+		break;
+	case SHA256_DIGEST_SIZE:
+		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
+		rctx->digest_size = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rctx->context_size = rctx->digest_size + 4;
+	rctx->active = 0;
+
+	return 0;
+}
+
+static int sahara_sha_update(struct ahash_request *req)
+{
+	return sahara_sha_enqueue(req, 0);
+}
+
+static int sahara_sha_final(struct ahash_request *req)
+{
+	req->nbytes = 0;
+	return sahara_sha_enqueue(req, 1);
+}
+
+static int sahara_sha_finup(struct ahash_request *req)
+{
+	return sahara_sha_enqueue(req, 1);
+}
+
+static int sahara_sha_digest(struct ahash_request *req)
+{
+	sahara_sha_init(req);
+
+	return sahara_sha_finup(req);
+}
+
+static int sahara_sha_export(struct ahash_request *req, void *out)
+{
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
+
+	return 0;
+}
+
+static int sahara_sha_import(struct ahash_request *req, const void *in)
+{
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
+
+	return 0;
+}
+
+static int sahara_sha_cra_init(struct crypto_tfm *tfm)
+{
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct sahara_sha_reqctx) +
+				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+
+	return 0;
+}
+
+static struct crypto_alg aes_algs[] = {
+{
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "sahara-ecb-aes",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct sahara_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= sahara_aes_cra_init,
+	.cra_exit		= sahara_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE ,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.setkey		= sahara_aes_setkey,
+		.encrypt	= sahara_aes_ecb_encrypt,
+		.decrypt	= sahara_aes_ecb_decrypt,
+	}
+}, {
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "sahara-cbc-aes",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct sahara_ctx),
+	.cra_alignmask		= 0x0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= sahara_aes_cra_init,
+	.cra_exit		= sahara_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE ,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= sahara_aes_setkey,
+		.encrypt	= sahara_aes_cbc_encrypt,
+		.decrypt	= sahara_aes_cbc_decrypt,
+	}
+}
+};
+
+static struct ahash_alg sha_v3_algs[] = {
+{
+	.init		= sahara_sha_init,
+	.update		= sahara_sha_update,
+	.final		= sahara_sha_final,
+	.finup		= sahara_sha_finup,
+	.digest		= sahara_sha_digest,
+	.export		= sahara_sha_export,
+	.import		= sahara_sha_import,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "sha1",
+		.cra_driver_name	= "sahara-sha1",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct sahara_ctx),
+		.cra_alignmask		= 0,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= sahara_sha_cra_init,
+	}
+},
+};
+
+static struct ahash_alg sha_v4_algs[] = {
+{
+	.init		= sahara_sha_init,
+	.update		= sahara_sha_update,
+	.final		= sahara_sha_final,
+	.finup		= sahara_sha_finup,
+	.digest		= sahara_sha_digest,
+	.export		= sahara_sha_export,
+	.import		= sahara_sha_import,
+	.halg.digestsize	= SHA256_DIGEST_SIZE,
+	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
+	.halg.base	= {
+		.cra_name		= "sha256",
+		.cra_driver_name	= "sahara-sha256",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA256_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct sahara_ctx),
+		.cra_alignmask		= 0,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= sahara_sha_cra_init,
+	}
+},
+};
+
+static irqreturn_t sahara_irq_handler(int irq, void *data)
+{
+	struct sahara_dev *dev = (struct sahara_dev *)data;
+	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
+	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
+
+	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
+		     SAHARA_REG_CMD);
+
+	sahara_decode_status(dev, stat);
+
+	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
+		return IRQ_NONE;
+	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
+		dev->error = 0;
+	} else {
+		sahara_decode_error(dev, err);
+		dev->error = -EINVAL;
+	}
+
+	complete(&dev->dma_completion);
+
+	return IRQ_HANDLED;
+}
+
+
+static int sahara_register_algs(struct sahara_dev *dev)
+{
+	int err;
+	unsigned int i, j, k, l;
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+		INIT_LIST_HEAD(&aes_algs[i].cra_list);
+		err = crypto_register_alg(&aes_algs[i]);
+		if (err)
+			goto err_aes_algs;
+	}
+
+	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
+		err = crypto_register_ahash(&sha_v3_algs[k]);
+		if (err)
+			goto err_sha_v3_algs;
+	}
+
+	if (dev->version > SAHARA_VERSION_3)
+		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
+			err = crypto_register_ahash(&sha_v4_algs[l]);
+			if (err)
+				goto err_sha_v4_algs;
+		}
+
+	return 0;
+
+err_sha_v4_algs:
+	for (j = 0; j < l; j++)
+		crypto_unregister_ahash(&sha_v4_algs[j]);
+
+err_sha_v3_algs:
+	for (j = 0; j < k; j++)
+		crypto_unregister_ahash(&sha_v3_algs[j]);
+
+err_aes_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_alg(&aes_algs[j]);
+
+	return err;
+}
+
+static void sahara_unregister_algs(struct sahara_dev *dev)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+		crypto_unregister_alg(&aes_algs[i]);
+
+	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
+		crypto_unregister_ahash(&sha_v3_algs[i]);
+
+	if (dev->version > SAHARA_VERSION_3)
+		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+			crypto_unregister_ahash(&sha_v4_algs[i]);
+}
+
+static const struct platform_device_id sahara_platform_ids[] = {
+	{ .name = "sahara-imx27" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
+
+static const struct of_device_id sahara_dt_ids[] = {
+	{ .compatible = "fsl,imx53-sahara" },
+	{ .compatible = "fsl,imx27-sahara" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sahara_dt_ids);
+
+static int sahara_probe(struct platform_device *pdev)
+{
+	struct sahara_dev *dev;
+	struct resource *res;
+	u32 version;
+	int irq;
+	int err;
+	int i;
+
+	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->device = &pdev->dev;
+	platform_set_drvdata(pdev, dev);
+
+	/* Get the base address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dev->regs_base))
+		return PTR_ERR(dev->regs_base);
+
+	/* Get the IRQ */
+	irq = platform_get_irq(pdev,  0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get irq resource\n");
+		return irq;
+	}
+
+	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
+			       0, dev_name(&pdev->dev), dev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to request irq\n");
+		return err;
+	}
+
+	/* clocks */
+	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(dev->clk_ipg)) {
+		dev_err(&pdev->dev, "Could not get ipg clock\n");
+		return PTR_ERR(dev->clk_ipg);
+	}
+
+	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(dev->clk_ahb)) {
+		dev_err(&pdev->dev, "Could not get ahb clock\n");
+		return PTR_ERR(dev->clk_ahb);
+	}
+
+	/* Allocate HW descriptors */
+	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
+			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
+			&dev->hw_phys_desc[0], GFP_KERNEL);
+	if (!dev->hw_desc[0]) {
+		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
+		return -ENOMEM;
+	}
+	dev->hw_desc[1] = dev->hw_desc[0] + 1;
+	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
+				sizeof(struct sahara_hw_desc);
+
+	/* Allocate space for iv and key */
+	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
+				&dev->key_phys_base, GFP_KERNEL);
+	if (!dev->key_base) {
+		dev_err(&pdev->dev, "Could not allocate memory for key\n");
+		return -ENOMEM;
+	}
+	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
+	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
+
+	/* Allocate space for context: largest digest + message length field */
+	dev->context_base = dmam_alloc_coherent(&pdev->dev,
+					SHA256_DIGEST_SIZE + 4,
+					&dev->context_phys_base, GFP_KERNEL);
+	if (!dev->context_base) {
+		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate space for HW links */
+	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
+			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
+			&dev->hw_phys_link[0], GFP_KERNEL);
+	if (!dev->hw_link[0]) {
+		dev_err(&pdev->dev, "Could not allocate hw links\n");
+		return -ENOMEM;
+	}
+	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
+		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
+					sizeof(struct sahara_hw_link);
+		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
+	}
+
+	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
+
+	mutex_init(&dev->queue_mutex);
+
+	dev_ptr = dev;
+
+	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
+	if (IS_ERR(dev->kthread)) {
+		return PTR_ERR(dev->kthread);
+	}
+
+	init_completion(&dev->dma_completion);
+
+	err = clk_prepare_enable(dev->clk_ipg);
+	if (err)
+		return err;
+	err = clk_prepare_enable(dev->clk_ahb);
+	if (err)
+		goto clk_ipg_disable;
+
+	version = sahara_read(dev, SAHARA_REG_VERSION);
+	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
+		if (version != SAHARA_VERSION_3)
+			err = -ENODEV;
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+			"fsl,imx53-sahara")) {
+		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
+			err = -ENODEV;
+		version = (version >> 8) & 0xff;
+	}
+	if (err == -ENODEV) {
+		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
+				version);
+		goto err_algs;
+	}
+
+	dev->version = version;
+
+	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
+		     SAHARA_REG_CMD);
+	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
+			SAHARA_CONTROL_SET_MAXBURST(8) |
+			SAHARA_CONTROL_RNG_AUTORSD |
+			SAHARA_CONTROL_ENABLE_INT,
+			SAHARA_REG_CONTROL);
+
+	err = sahara_register_algs(dev);
+	if (err)
+		goto err_algs;
+
+	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
+
+	return 0;
+
+err_algs:
+	kthread_stop(dev->kthread);
+	dev_ptr = NULL;
+	clk_disable_unprepare(dev->clk_ahb);
+clk_ipg_disable:
+	clk_disable_unprepare(dev->clk_ipg);
+
+	return err;
+}
+
+static int sahara_remove(struct platform_device *pdev)
+{
+	struct sahara_dev *dev = platform_get_drvdata(pdev);
+
+	kthread_stop(dev->kthread);
+
+	sahara_unregister_algs(dev);
+
+	clk_disable_unprepare(dev->clk_ipg);
+	clk_disable_unprepare(dev->clk_ahb);
+
+	dev_ptr = NULL;
+
+	return 0;
+}
+
+static struct platform_driver sahara_driver = {
+	.probe		= sahara_probe,
+	.remove		= sahara_remove,
+	.driver		= {
+		.name	= SAHARA_NAME,
+		.of_match_table = sahara_dt_ids,
+	},
+	.id_table = sahara_platform_ids,
+};
+
+module_platform_driver(sahara_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
+MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
new file mode 100644
index 0000000..63aa78c
--- /dev/null
+++ b/drivers/crypto/stm32/Kconfig
@@ -0,0 +1,29 @@
+config CRYPTO_DEV_STM32_CRC
+	tristate "Support for STM32 crc accelerators"
+	depends on ARCH_STM32
+	select CRYPTO_HASH
+	help
+          This enables support for the CRC32 hw accelerator which can be found
+	  on STMicroelectronics STM32 SOC.
+
+config CRYPTO_DEV_STM32_HASH
+	tristate "Support for STM32 hash accelerators"
+	depends on ARCH_STM32
+	depends on HAS_DMA
+	select CRYPTO_HASH
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_ENGINE
+	help
+          This enables support for the HASH hw accelerator which can be found
+	  on STMicroelectronics STM32 SOC.
+
+config CRYPTO_DEV_STM32_CRYP
+	tristate "Support for STM32 cryp accelerators"
+	depends on ARCH_STM32
+	select CRYPTO_HASH
+	select CRYPTO_ENGINE
+	help
+          This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+	  can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
new file mode 100644
index 0000000..53d1bb9
--- /dev/null
+++ b/drivers/crypto/stm32/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
new file mode 100644
index 0000000..23b0b7b
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -0,0 +1,2124 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+
+#define DRIVER_NAME             "stm32-cryp"
+
+/* Bit [0] encrypt / decrypt */
+#define FLG_ENCRYPT             BIT(0)
+/* Bit [8..1] algo & operation mode */
+#define FLG_AES                 BIT(1)
+#define FLG_DES                 BIT(2)
+#define FLG_TDES                BIT(3)
+#define FLG_ECB                 BIT(4)
+#define FLG_CBC                 BIT(5)
+#define FLG_CTR                 BIT(6)
+#define FLG_GCM                 BIT(7)
+#define FLG_CCM                 BIT(8)
+/* Mode mask = bits [15..0] */
+#define FLG_MODE_MASK           GENMASK(15, 0)
+/* Bit [31..16] status  */
+#define FLG_CCM_PADDED_WA       BIT(16)
+
+/* Registers */
+#define CRYP_CR                 0x00000000
+#define CRYP_SR                 0x00000004
+#define CRYP_DIN                0x00000008
+#define CRYP_DOUT               0x0000000C
+#define CRYP_DMACR              0x00000010
+#define CRYP_IMSCR              0x00000014
+#define CRYP_RISR               0x00000018
+#define CRYP_MISR               0x0000001C
+#define CRYP_K0LR               0x00000020
+#define CRYP_K0RR               0x00000024
+#define CRYP_K1LR               0x00000028
+#define CRYP_K1RR               0x0000002C
+#define CRYP_K2LR               0x00000030
+#define CRYP_K2RR               0x00000034
+#define CRYP_K3LR               0x00000038
+#define CRYP_K3RR               0x0000003C
+#define CRYP_IV0LR              0x00000040
+#define CRYP_IV0RR              0x00000044
+#define CRYP_IV1LR              0x00000048
+#define CRYP_IV1RR              0x0000004C
+#define CRYP_CSGCMCCM0R         0x00000050
+#define CRYP_CSGCM0R            0x00000070
+
+/* Registers values */
+#define CR_DEC_NOT_ENC          0x00000004
+#define CR_TDES_ECB             0x00000000
+#define CR_TDES_CBC             0x00000008
+#define CR_DES_ECB              0x00000010
+#define CR_DES_CBC              0x00000018
+#define CR_AES_ECB              0x00000020
+#define CR_AES_CBC              0x00000028
+#define CR_AES_CTR              0x00000030
+#define CR_AES_KP               0x00000038
+#define CR_AES_GCM              0x00080000
+#define CR_AES_CCM              0x00080008
+#define CR_AES_UNKNOWN          0xFFFFFFFF
+#define CR_ALGO_MASK            0x00080038
+#define CR_DATA32               0x00000000
+#define CR_DATA16               0x00000040
+#define CR_DATA8                0x00000080
+#define CR_DATA1                0x000000C0
+#define CR_KEY128               0x00000000
+#define CR_KEY192               0x00000100
+#define CR_KEY256               0x00000200
+#define CR_FFLUSH               0x00004000
+#define CR_CRYPEN               0x00008000
+#define CR_PH_INIT              0x00000000
+#define CR_PH_HEADER            0x00010000
+#define CR_PH_PAYLOAD           0x00020000
+#define CR_PH_FINAL             0x00030000
+#define CR_PH_MASK              0x00030000
+#define CR_NBPBL_SHIFT          20
+
+#define SR_BUSY                 0x00000010
+#define SR_OFNE                 0x00000004
+
+#define IMSCR_IN                BIT(0)
+#define IMSCR_OUT               BIT(1)
+
+#define MISR_IN                 BIT(0)
+#define MISR_OUT                BIT(1)
+
+/* Misc */
+#define AES_BLOCK_32            (AES_BLOCK_SIZE / sizeof(u32))
+#define GCM_CTR_INIT            2
+#define _walked_in              (cryp->in_walk.offset - cryp->in_sg->offset)
+#define _walked_out             (cryp->out_walk.offset - cryp->out_sg->offset)
+#define CRYP_AUTOSUSPEND_DELAY	50
+
+struct stm32_cryp_caps {
+	bool                    swap_final;
+	bool                    padding_wa;
+};
+
+struct stm32_cryp_ctx {
+	struct crypto_engine_ctx enginectx;
+	struct stm32_cryp       *cryp;
+	int                     keylen;
+	u32                     key[AES_KEYSIZE_256 / sizeof(u32)];
+	unsigned long           flags;
+};
+
+struct stm32_cryp_reqctx {
+	unsigned long mode;
+};
+
+struct stm32_cryp {
+	struct list_head        list;
+	struct device           *dev;
+	void __iomem            *regs;
+	struct clk              *clk;
+	unsigned long           flags;
+	u32                     irq_status;
+	const struct stm32_cryp_caps *caps;
+	struct stm32_cryp_ctx   *ctx;
+
+	struct crypto_engine    *engine;
+
+	struct mutex            lock; /* protects req / areq */
+	struct ablkcipher_request *req;
+	struct aead_request     *areq;
+
+	size_t                  authsize;
+	size_t                  hw_blocksize;
+
+	size_t                  total_in;
+	size_t                  total_in_save;
+	size_t                  total_out;
+	size_t                  total_out_save;
+
+	struct scatterlist      *in_sg;
+	struct scatterlist      *out_sg;
+	struct scatterlist      *out_sg_save;
+
+	struct scatterlist      in_sgl;
+	struct scatterlist      out_sgl;
+	bool                    sgs_copied;
+
+	int                     in_sg_len;
+	int                     out_sg_len;
+
+	struct scatter_walk     in_walk;
+	struct scatter_walk     out_walk;
+
+	u32                     last_ctr[4];
+	u32                     gcm_ctr;
+};
+
+struct stm32_cryp_list {
+	struct list_head        dev_list;
+	spinlock_t              lock; /* protect dev_list */
+};
+
+static struct stm32_cryp_list cryp_list = {
+	.dev_list = LIST_HEAD_INIT(cryp_list.dev_list),
+	.lock     = __SPIN_LOCK_UNLOCKED(cryp_list.lock),
+};
+
+static inline bool is_aes(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_AES;
+}
+
+static inline bool is_des(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_DES;
+}
+
+static inline bool is_tdes(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_TDES;
+}
+
+static inline bool is_ecb(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_ECB;
+}
+
+static inline bool is_cbc(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_CBC;
+}
+
+static inline bool is_ctr(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_CTR;
+}
+
+static inline bool is_gcm(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_GCM;
+}
+
+static inline bool is_ccm(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_CCM;
+}
+
+static inline bool is_encrypt(struct stm32_cryp *cryp)
+{
+	return cryp->flags & FLG_ENCRYPT;
+}
+
+static inline bool is_decrypt(struct stm32_cryp *cryp)
+{
+	return !is_encrypt(cryp);
+}
+
+static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst)
+{
+	return readl_relaxed(cryp->regs + ofst);
+}
+
+static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val)
+{
+	writel_relaxed(val, cryp->regs + ofst);
+}
+
+static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
+{
+	u32 status;
+
+	return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+			!(status & SR_BUSY), 10, 100000);
+}
+
+static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp)
+{
+	u32 status;
+
+	return readl_relaxed_poll_timeout(cryp->regs + CRYP_CR, status,
+			!(status & CR_CRYPEN), 10, 100000);
+}
+
+static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp)
+{
+	u32 status;
+
+	return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+			status & SR_OFNE, 10, 100000);
+}
+
+static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp);
+
+static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
+{
+	struct stm32_cryp *tmp, *cryp = NULL;
+
+	spin_lock_bh(&cryp_list.lock);
+	if (!ctx->cryp) {
+		list_for_each_entry(tmp, &cryp_list.dev_list, list) {
+			cryp = tmp;
+			break;
+		}
+		ctx->cryp = cryp;
+	} else {
+		cryp = ctx->cryp;
+	}
+
+	spin_unlock_bh(&cryp_list.lock);
+
+	return cryp;
+}
+
+static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
+				    size_t align)
+{
+	int len = 0;
+
+	if (!total)
+		return 0;
+
+	if (!IS_ALIGNED(total, align))
+		return -EINVAL;
+
+	while (sg) {
+		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+			return -EINVAL;
+
+		if (!IS_ALIGNED(sg->length, align))
+			return -EINVAL;
+
+		len += sg->length;
+		sg = sg_next(sg);
+	}
+
+	if (len != total)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
+{
+	int ret;
+
+	ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
+				       cryp->hw_blocksize);
+	if (ret)
+		return ret;
+
+	ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
+				       cryp->hw_blocksize);
+
+	return ret;
+}
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+			unsigned int start, unsigned int nbytes, int out)
+{
+	struct scatter_walk walk;
+
+	if (!nbytes)
+		return;
+
+	scatterwalk_start(&walk, sg);
+	scatterwalk_advance(&walk, start);
+	scatterwalk_copychunks(buf, &walk, nbytes, out);
+	scatterwalk_done(&walk, out, 0);
+}
+
+static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
+{
+	void *buf_in, *buf_out;
+	int pages, total_in, total_out;
+
+	if (!stm32_cryp_check_io_aligned(cryp)) {
+		cryp->sgs_copied = 0;
+		return 0;
+	}
+
+	total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
+	pages = total_in ? get_order(total_in) : 1;
+	buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+	total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
+	pages = total_out ? get_order(total_out) : 1;
+	buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+	if (!buf_in || !buf_out) {
+		dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
+		cryp->sgs_copied = 0;
+		return -EFAULT;
+	}
+
+	sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
+
+	sg_init_one(&cryp->in_sgl, buf_in, total_in);
+	cryp->in_sg = &cryp->in_sgl;
+	cryp->in_sg_len = 1;
+
+	sg_init_one(&cryp->out_sgl, buf_out, total_out);
+	cryp->out_sg_save = cryp->out_sg;
+	cryp->out_sg = &cryp->out_sgl;
+	cryp->out_sg_len = 1;
+
+	cryp->sgs_copied = 1;
+
+	return 0;
+}
+
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+{
+	if (!iv)
+		return;
+
+	stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
+	stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+
+	if (is_aes(cryp)) {
+		stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
+		stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+	}
+}
+
+static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
+{
+	unsigned int i;
+	int r_id;
+
+	if (is_des(c)) {
+		stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
+		stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+	} else {
+		r_id = CRYP_K3RR;
+		for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
+			stm32_cryp_write(c, r_id,
+					 cpu_to_be32(c->ctx->key[i - 1]));
+	}
+}
+
+static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
+{
+	if (is_aes(cryp) && is_ecb(cryp))
+		return CR_AES_ECB;
+
+	if (is_aes(cryp) && is_cbc(cryp))
+		return CR_AES_CBC;
+
+	if (is_aes(cryp) && is_ctr(cryp))
+		return CR_AES_CTR;
+
+	if (is_aes(cryp) && is_gcm(cryp))
+		return CR_AES_GCM;
+
+	if (is_aes(cryp) && is_ccm(cryp))
+		return CR_AES_CCM;
+
+	if (is_des(cryp) && is_ecb(cryp))
+		return CR_DES_ECB;
+
+	if (is_des(cryp) && is_cbc(cryp))
+		return CR_DES_CBC;
+
+	if (is_tdes(cryp) && is_ecb(cryp))
+		return CR_TDES_ECB;
+
+	if (is_tdes(cryp) && is_cbc(cryp))
+		return CR_TDES_CBC;
+
+	dev_err(cryp->dev, "Unknown mode\n");
+	return CR_AES_UNKNOWN;
+}
+
+static unsigned int stm32_cryp_get_input_text_len(struct stm32_cryp *cryp)
+{
+	return is_encrypt(cryp) ? cryp->areq->cryptlen :
+				  cryp->areq->cryptlen - cryp->authsize;
+}
+
+static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
+{
+	int ret;
+	u32 iv[4];
+
+	/* Phase 1 : init */
+	memcpy(iv, cryp->areq->iv, 12);
+	iv[3] = cpu_to_be32(GCM_CTR_INIT);
+	cryp->gcm_ctr = GCM_CTR_INIT;
+	stm32_cryp_hw_write_iv(cryp, iv);
+
+	stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
+
+	/* Wait for end of processing */
+	ret = stm32_cryp_wait_enable(cryp);
+	if (ret)
+		dev_err(cryp->dev, "Timeout (gcm init)\n");
+
+	return ret;
+}
+
+static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
+{
+	int ret;
+	u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+	u32 *d;
+	unsigned int i, textlen;
+
+	/* Phase 1 : init. Firstly set the CTR value to 1 (not 0) */
+	memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
+	memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+	iv[AES_BLOCK_SIZE - 1] = 1;
+	stm32_cryp_hw_write_iv(cryp, (u32 *)iv);
+
+	/* Build B0 */
+	memcpy(b0, iv, AES_BLOCK_SIZE);
+
+	b0[0] |= (8 * ((cryp->authsize - 2) / 2));
+
+	if (cryp->areq->assoclen)
+		b0[0] |= 0x40;
+
+	textlen = stm32_cryp_get_input_text_len(cryp);
+
+	b0[AES_BLOCK_SIZE - 2] = textlen >> 8;
+	b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
+
+	/* Enable HW */
+	stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
+
+	/* Write B0 */
+	d = (u32 *)b0;
+
+	for (i = 0; i < AES_BLOCK_32; i++) {
+		if (!cryp->caps->padding_wa)
+			*d = cpu_to_be32(*d);
+		stm32_cryp_write(cryp, CRYP_DIN, *d++);
+	}
+
+	/* Wait for end of processing */
+	ret = stm32_cryp_wait_enable(cryp);
+	if (ret)
+		dev_err(cryp->dev, "Timeout (ccm init)\n");
+
+	return ret;
+}
+
+static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+{
+	int ret;
+	u32 cfg, hw_mode;
+
+	pm_runtime_get_sync(cryp->dev);
+
+	/* Disable interrupt */
+	stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+	/* Set key */
+	stm32_cryp_hw_write_key(cryp);
+
+	/* Set configuration */
+	cfg = CR_DATA8 | CR_FFLUSH;
+
+	switch (cryp->ctx->keylen) {
+	case AES_KEYSIZE_128:
+		cfg |= CR_KEY128;
+		break;
+
+	case AES_KEYSIZE_192:
+		cfg |= CR_KEY192;
+		break;
+
+	default:
+	case AES_KEYSIZE_256:
+		cfg |= CR_KEY256;
+		break;
+	}
+
+	hw_mode = stm32_cryp_get_hw_mode(cryp);
+	if (hw_mode == CR_AES_UNKNOWN)
+		return -EINVAL;
+
+	/* AES ECB/CBC decrypt: run key preparation first */
+	if (is_decrypt(cryp) &&
+	    ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
+		stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+
+		/* Wait for end of processing */
+		ret = stm32_cryp_wait_busy(cryp);
+		if (ret) {
+			dev_err(cryp->dev, "Timeout (key preparation)\n");
+			return ret;
+		}
+	}
+
+	cfg |= hw_mode;
+
+	if (is_decrypt(cryp))
+		cfg |= CR_DEC_NOT_ENC;
+
+	/* Apply config and flush (valid when CRYPEN = 0) */
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	switch (hw_mode) {
+	case CR_AES_GCM:
+	case CR_AES_CCM:
+		/* Phase 1 : init */
+		if (hw_mode == CR_AES_CCM)
+			ret = stm32_cryp_ccm_init(cryp, cfg);
+		else
+			ret = stm32_cryp_gcm_init(cryp, cfg);
+
+		if (ret)
+			return ret;
+
+		/* Phase 2 : header (authenticated data) */
+		if (cryp->areq->assoclen) {
+			cfg |= CR_PH_HEADER;
+		} else if (stm32_cryp_get_input_text_len(cryp)) {
+			cfg |= CR_PH_PAYLOAD;
+			stm32_cryp_write(cryp, CRYP_CR, cfg);
+		} else {
+			cfg |= CR_PH_INIT;
+		}
+
+		break;
+
+	case CR_DES_CBC:
+	case CR_TDES_CBC:
+	case CR_AES_CBC:
+	case CR_AES_CTR:
+		stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+		break;
+
+	default:
+		break;
+	}
+
+	/* Enable now */
+	cfg |= CR_CRYPEN;
+
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	cryp->flags &= ~FLG_CCM_PADDED_WA;
+
+	return 0;
+}
+
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
+{
+	if (!err && (is_gcm(cryp) || is_ccm(cryp)))
+		/* Phase 4 : output tag */
+		err = stm32_cryp_read_auth_tag(cryp);
+
+	if (cryp->sgs_copied) {
+		void *buf_in, *buf_out;
+		int pages, len;
+
+		buf_in = sg_virt(&cryp->in_sgl);
+		buf_out = sg_virt(&cryp->out_sgl);
+
+		sg_copy_buf(buf_out, cryp->out_sg_save, 0,
+			    cryp->total_out_save, 1);
+
+		len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
+		pages = len ? get_order(len) : 1;
+		free_pages((unsigned long)buf_in, pages);
+
+		len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
+		pages = len ? get_order(len) : 1;
+		free_pages((unsigned long)buf_out, pages);
+	}
+
+	pm_runtime_mark_last_busy(cryp->dev);
+	pm_runtime_put_autosuspend(cryp->dev);
+
+	if (is_gcm(cryp) || is_ccm(cryp)) {
+		crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
+		cryp->areq = NULL;
+	} else {
+		crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
+						   err);
+		cryp->req = NULL;
+	}
+
+	memset(cryp->ctx->key, 0, cryp->ctx->keylen);
+
+	mutex_unlock(&cryp->lock);
+}
+
+static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
+{
+	/* Enable interrupt and let the IRQ handler do everything */
+	stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT);
+
+	return 0;
+}
+
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+					 void *areq);
+
+static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+{
+	struct stm32_cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+
+	ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
+	ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
+	ctx->enginectx.op.unprepare_request = NULL;
+	return 0;
+}
+
+static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
+static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine,
+				       void *areq);
+
+static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
+{
+	struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+	tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
+
+	ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req;
+	ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req;
+	ctx->enginectx.op.unprepare_request = NULL;
+
+	return 0;
+}
+
+static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+	struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+	struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+	if (!cryp)
+		return -ENODEV;
+
+	rctx->mode = mode;
+
+	return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
+{
+	struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct stm32_cryp_reqctx *rctx = aead_request_ctx(req);
+	struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+	if (!cryp)
+		return -ENODEV;
+
+	rctx->mode = mode;
+
+	return crypto_transfer_aead_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			     unsigned int keylen)
+{
+	struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256)
+		return -EINVAL;
+	else
+		return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+				 unsigned int keylen)
+{
+	if (keylen != DES_KEY_SIZE)
+		return -EINVAL;
+	else
+		return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+				  unsigned int keylen)
+{
+	if (keylen != (3 * DES_KEY_SIZE))
+		return -EINVAL;
+	else
+		return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+				      unsigned int keylen)
+{
+	struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256)
+		return -EINVAL;
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+					  unsigned int authsize)
+{
+	return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL;
+}
+
+static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
+					  unsigned int authsize)
+{
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
+}
+
+static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
+}
+
+static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
+}
+
+static int stm32_cryp_aes_gcm_encrypt(struct aead_request *req)
+{
+	return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req)
+{
+	return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM);
+}
+
+static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req)
+{
+	return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
+{
+	return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
+}
+
+static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
+}
+
+static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
+}
+
+static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
+}
+
+static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+	return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
+}
+
+static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
+				  struct aead_request *areq)
+{
+	struct stm32_cryp_ctx *ctx;
+	struct stm32_cryp *cryp;
+	struct stm32_cryp_reqctx *rctx;
+	int ret;
+
+	if (!req && !areq)
+		return -EINVAL;
+
+	ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) :
+		    crypto_aead_ctx(crypto_aead_reqtfm(areq));
+
+	cryp = ctx->cryp;
+
+	if (!cryp)
+		return -ENODEV;
+
+	mutex_lock(&cryp->lock);
+
+	rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
+	rctx->mode &= FLG_MODE_MASK;
+
+	ctx->cryp = cryp;
+
+	cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode;
+	cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
+	cryp->ctx = ctx;
+
+	if (req) {
+		cryp->req = req;
+		cryp->total_in = req->nbytes;
+		cryp->total_out = cryp->total_in;
+	} else {
+		/*
+		 * Length of input and output data:
+		 * Encryption case:
+		 *  INPUT  =   AssocData  ||   PlainText
+		 *          <- assoclen ->  <- cryptlen ->
+		 *          <------- total_in ----------->
+		 *
+		 *  OUTPUT =   AssocData  ||  CipherText  ||   AuthTag
+		 *          <- assoclen ->  <- cryptlen ->  <- authsize ->
+		 *          <---------------- total_out ----------------->
+		 *
+		 * Decryption case:
+		 *  INPUT  =   AssocData  ||  CipherText  ||  AuthTag
+		 *          <- assoclen ->  <--------- cryptlen --------->
+		 *                                          <- authsize ->
+		 *          <---------------- total_in ------------------>
+		 *
+		 *  OUTPUT =   AssocData  ||   PlainText
+		 *          <- assoclen ->  <- crypten - authsize ->
+		 *          <---------- total_out ----------------->
+		 */
+		cryp->areq = areq;
+		cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
+		cryp->total_in = areq->assoclen + areq->cryptlen;
+		if (is_encrypt(cryp))
+			/* Append auth tag to output */
+			cryp->total_out = cryp->total_in + cryp->authsize;
+		else
+			/* No auth tag in output */
+			cryp->total_out = cryp->total_in - cryp->authsize;
+	}
+
+	cryp->total_in_save = cryp->total_in;
+	cryp->total_out_save = cryp->total_out;
+
+	cryp->in_sg = req ? req->src : areq->src;
+	cryp->out_sg = req ? req->dst : areq->dst;
+	cryp->out_sg_save = cryp->out_sg;
+
+	cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
+	if (cryp->in_sg_len < 0) {
+		dev_err(cryp->dev, "Cannot get in_sg_len\n");
+		ret = cryp->in_sg_len;
+		goto out;
+	}
+
+	cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
+	if (cryp->out_sg_len < 0) {
+		dev_err(cryp->dev, "Cannot get out_sg_len\n");
+		ret = cryp->out_sg_len;
+		goto out;
+	}
+
+	ret = stm32_cryp_copy_sgs(cryp);
+	if (ret)
+		goto out;
+
+	scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+	scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+
+	if (is_gcm(cryp) || is_ccm(cryp)) {
+		/* In output, jump after assoc data */
+		scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen);
+		cryp->total_out -= cryp->areq->assoclen;
+	}
+
+	ret = stm32_cryp_hw_init(cryp);
+out:
+	if (ret)
+		mutex_unlock(&cryp->lock);
+
+	return ret;
+}
+
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+					 void *areq)
+{
+	struct ablkcipher_request *req = container_of(areq,
+						      struct ablkcipher_request,
+						      base);
+
+	return stm32_cryp_prepare_req(req, NULL);
+}
+
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
+{
+	struct ablkcipher_request *req = container_of(areq,
+						      struct ablkcipher_request,
+						      base);
+	struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct stm32_cryp *cryp = ctx->cryp;
+
+	if (!cryp)
+		return -ENODEV;
+
+	return stm32_cryp_cpu_start(cryp);
+}
+
+static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq)
+{
+	struct aead_request *req = container_of(areq, struct aead_request,
+						base);
+
+	return stm32_cryp_prepare_req(NULL, req);
+}
+
+static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
+{
+	struct aead_request *req = container_of(areq, struct aead_request,
+						base);
+	struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct stm32_cryp *cryp = ctx->cryp;
+
+	if (!cryp)
+		return -ENODEV;
+
+	if (unlikely(!cryp->areq->assoclen &&
+		     !stm32_cryp_get_input_text_len(cryp))) {
+		/* No input data to process: get tag and finish */
+		stm32_cryp_finish_req(cryp, 0);
+		return 0;
+	}
+
+	return stm32_cryp_cpu_start(cryp);
+}
+
+static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
+				unsigned int n)
+{
+	scatterwalk_advance(&cryp->out_walk, n);
+
+	if (unlikely(cryp->out_sg->length == _walked_out)) {
+		cryp->out_sg = sg_next(cryp->out_sg);
+		if (cryp->out_sg) {
+			scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+			return (sg_virt(cryp->out_sg) + _walked_out);
+		}
+	}
+
+	return (u32 *)((u8 *)dst + n);
+}
+
+static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
+			       unsigned int n)
+{
+	scatterwalk_advance(&cryp->in_walk, n);
+
+	if (unlikely(cryp->in_sg->length == _walked_in)) {
+		cryp->in_sg = sg_next(cryp->in_sg);
+		if (cryp->in_sg) {
+			scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+			return (sg_virt(cryp->in_sg) + _walked_in);
+		}
+	}
+
+	return (u32 *)((u8 *)src + n);
+}
+
+static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
+{
+	u32 cfg, size_bit, *dst, d32;
+	u8 *d8;
+	unsigned int i, j;
+	int ret = 0;
+
+	/* Update Config */
+	cfg = stm32_cryp_read(cryp, CRYP_CR);
+
+	cfg &= ~CR_PH_MASK;
+	cfg |= CR_PH_FINAL;
+	cfg &= ~CR_DEC_NOT_ENC;
+	cfg |= CR_CRYPEN;
+
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	if (is_gcm(cryp)) {
+		/* GCM: write aad and payload size (in bits) */
+		size_bit = cryp->areq->assoclen * 8;
+		if (cryp->caps->swap_final)
+			size_bit = cpu_to_be32(size_bit);
+
+		stm32_cryp_write(cryp, CRYP_DIN, 0);
+		stm32_cryp_write(cryp, CRYP_DIN, size_bit);
+
+		size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen :
+				cryp->areq->cryptlen - AES_BLOCK_SIZE;
+		size_bit *= 8;
+		if (cryp->caps->swap_final)
+			size_bit = cpu_to_be32(size_bit);
+
+		stm32_cryp_write(cryp, CRYP_DIN, 0);
+		stm32_cryp_write(cryp, CRYP_DIN, size_bit);
+	} else {
+		/* CCM: write CTR0 */
+		u8 iv[AES_BLOCK_SIZE];
+		u32 *iv32 = (u32 *)iv;
+
+		memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
+		memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+
+		for (i = 0; i < AES_BLOCK_32; i++) {
+			if (!cryp->caps->padding_wa)
+				*iv32 = cpu_to_be32(*iv32);
+			stm32_cryp_write(cryp, CRYP_DIN, *iv32++);
+		}
+	}
+
+	/* Wait for output data */
+	ret = stm32_cryp_wait_output(cryp);
+	if (ret) {
+		dev_err(cryp->dev, "Timeout (read tag)\n");
+		return ret;
+	}
+
+	if (is_encrypt(cryp)) {
+		/* Get and write tag */
+		dst = sg_virt(cryp->out_sg) + _walked_out;
+
+		for (i = 0; i < AES_BLOCK_32; i++) {
+			if (cryp->total_out >= sizeof(u32)) {
+				/* Read a full u32 */
+				*dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+				dst = stm32_cryp_next_out(cryp, dst,
+							  sizeof(u32));
+				cryp->total_out -= sizeof(u32);
+			} else if (!cryp->total_out) {
+				/* Empty fifo out (data from input padding) */
+				stm32_cryp_read(cryp, CRYP_DOUT);
+			} else {
+				/* Read less than an u32 */
+				d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+				d8 = (u8 *)&d32;
+
+				for (j = 0; j < cryp->total_out; j++) {
+					*((u8 *)dst) = *(d8++);
+					dst = stm32_cryp_next_out(cryp, dst, 1);
+				}
+				cryp->total_out = 0;
+			}
+		}
+	} else {
+		/* Get and check tag */
+		u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
+
+		scatterwalk_map_and_copy(in_tag, cryp->in_sg,
+					 cryp->total_in_save - cryp->authsize,
+					 cryp->authsize, 0);
+
+		for (i = 0; i < AES_BLOCK_32; i++)
+			out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+
+		if (crypto_memneq(in_tag, out_tag, cryp->authsize))
+			ret = -EBADMSG;
+	}
+
+	/* Disable cryp */
+	cfg &= ~CR_CRYPEN;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	return ret;
+}
+
+static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
+{
+	u32 cr;
+
+	if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
+		cryp->last_ctr[3] = 0;
+		cryp->last_ctr[2]++;
+		if (!cryp->last_ctr[2]) {
+			cryp->last_ctr[1]++;
+			if (!cryp->last_ctr[1])
+				cryp->last_ctr[0]++;
+		}
+
+		cr = stm32_cryp_read(cryp, CRYP_CR);
+		stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
+
+		stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+
+		stm32_cryp_write(cryp, CRYP_CR, cr);
+	}
+
+	cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
+	cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
+	cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
+	cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+}
+
+static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+{
+	unsigned int i, j;
+	u32 d32, *dst;
+	u8 *d8;
+	size_t tag_size;
+
+	/* Do no read tag now (if any) */
+	if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
+		tag_size = cryp->authsize;
+	else
+		tag_size = 0;
+
+	dst = sg_virt(cryp->out_sg) + _walked_out;
+
+	for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+		if (likely(cryp->total_out - tag_size >= sizeof(u32))) {
+			/* Read a full u32 */
+			*dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+			dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
+			cryp->total_out -= sizeof(u32);
+		} else if (cryp->total_out == tag_size) {
+			/* Empty fifo out (data from input padding) */
+			d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+		} else {
+			/* Read less than an u32 */
+			d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+			d8 = (u8 *)&d32;
+
+			for (j = 0; j < cryp->total_out - tag_size; j++) {
+				*((u8 *)dst) = *(d8++);
+				dst = stm32_cryp_next_out(cryp, dst, 1);
+			}
+			cryp->total_out = tag_size;
+		}
+	}
+
+	return !(cryp->total_out - tag_size) || !cryp->total_in;
+}
+
+static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
+{
+	unsigned int i, j;
+	u32 *src;
+	u8 d8[4];
+	size_t tag_size;
+
+	/* Do no write tag (if any) */
+	if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
+		tag_size = cryp->authsize;
+	else
+		tag_size = 0;
+
+	src = sg_virt(cryp->in_sg) + _walked_in;
+
+	for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+		if (likely(cryp->total_in - tag_size >= sizeof(u32))) {
+			/* Write a full u32 */
+			stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+			src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+			cryp->total_in -= sizeof(u32);
+		} else if (cryp->total_in == tag_size) {
+			/* Write padding data */
+			stm32_cryp_write(cryp, CRYP_DIN, 0);
+		} else {
+			/* Write less than an u32 */
+			memset(d8, 0, sizeof(u32));
+			for (j = 0; j < cryp->total_in - tag_size; j++) {
+				d8[j] = *((u8 *)src);
+				src = stm32_cryp_next_in(cryp, src, 1);
+			}
+
+			stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+			cryp->total_in = tag_size;
+		}
+	}
+}
+
+static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
+{
+	int err;
+	u32 cfg, tmp[AES_BLOCK_32];
+	size_t total_in_ori = cryp->total_in;
+	struct scatterlist *out_sg_ori = cryp->out_sg;
+	unsigned int i;
+
+	/* 'Special workaround' procedure described in the datasheet */
+
+	/* a) disable ip */
+	stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+	cfg = stm32_cryp_read(cryp, CRYP_CR);
+	cfg &= ~CR_CRYPEN;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* b) Update IV1R */
+	stm32_cryp_write(cryp, CRYP_IV1RR, cryp->gcm_ctr - 2);
+
+	/* c) change mode to CTR */
+	cfg &= ~CR_ALGO_MASK;
+	cfg |= CR_AES_CTR;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* a) enable IP */
+	cfg |= CR_CRYPEN;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* b) pad and write the last block */
+	stm32_cryp_irq_write_block(cryp);
+	cryp->total_in = total_in_ori;
+	err = stm32_cryp_wait_output(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Timeout (write gcm header)\n");
+		return stm32_cryp_finish_req(cryp, err);
+	}
+
+	/* c) get and store encrypted data */
+	stm32_cryp_irq_read_data(cryp);
+	scatterwalk_map_and_copy(tmp, out_sg_ori,
+				 cryp->total_in_save - total_in_ori,
+				 total_in_ori, 0);
+
+	/* d) change mode back to AES GCM */
+	cfg &= ~CR_ALGO_MASK;
+	cfg |= CR_AES_GCM;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* e) change phase to Final */
+	cfg &= ~CR_PH_MASK;
+	cfg |= CR_PH_FINAL;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* f) write padded data */
+	for (i = 0; i < AES_BLOCK_32; i++) {
+		if (cryp->total_in)
+			stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
+		else
+			stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+		cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
+	}
+
+	/* g) Empty fifo out */
+	err = stm32_cryp_wait_output(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Timeout (write gcm header)\n");
+		return stm32_cryp_finish_req(cryp, err);
+	}
+
+	for (i = 0; i < AES_BLOCK_32; i++)
+		stm32_cryp_read(cryp, CRYP_DOUT);
+
+	/* h) run the he normal Final phase */
+	stm32_cryp_finish_req(cryp, 0);
+}
+
+static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp)
+{
+	u32 cfg, payload_bytes;
+
+	/* disable ip, set NPBLB and reneable ip */
+	cfg = stm32_cryp_read(cryp, CRYP_CR);
+	cfg &= ~CR_CRYPEN;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize :
+					   cryp->total_in;
+	cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT;
+	cfg |= CR_CRYPEN;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+}
+
+static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
+{
+	int err = 0;
+	u32 cfg, iv1tmp;
+	u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32];
+	size_t last_total_out, total_in_ori = cryp->total_in;
+	struct scatterlist *out_sg_ori = cryp->out_sg;
+	unsigned int i;
+
+	/* 'Special workaround' procedure described in the datasheet */
+	cryp->flags |= FLG_CCM_PADDED_WA;
+
+	/* a) disable ip */
+	stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+	cfg = stm32_cryp_read(cryp, CRYP_CR);
+	cfg &= ~CR_CRYPEN;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* b) get IV1 from CRYP_CSGCMCCM7 */
+	iv1tmp = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + 7 * 4);
+
+	/* c) Load CRYP_CSGCMCCMxR */
+	for (i = 0; i < ARRAY_SIZE(cstmp1); i++)
+		cstmp1[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
+
+	/* d) Write IV1R */
+	stm32_cryp_write(cryp, CRYP_IV1RR, iv1tmp);
+
+	/* e) change mode to CTR */
+	cfg &= ~CR_ALGO_MASK;
+	cfg |= CR_AES_CTR;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* a) enable IP */
+	cfg |= CR_CRYPEN;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* b) pad and write the last block */
+	stm32_cryp_irq_write_block(cryp);
+	cryp->total_in = total_in_ori;
+	err = stm32_cryp_wait_output(cryp);
+	if (err) {
+		dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
+		return stm32_cryp_finish_req(cryp, err);
+	}
+
+	/* c) get and store decrypted data */
+	last_total_out = cryp->total_out;
+	stm32_cryp_irq_read_data(cryp);
+
+	memset(tmp, 0, sizeof(tmp));
+	scatterwalk_map_and_copy(tmp, out_sg_ori,
+				 cryp->total_out_save - last_total_out,
+				 last_total_out, 0);
+
+	/* d) Load again CRYP_CSGCMCCMxR */
+	for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
+		cstmp2[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
+
+	/* e) change mode back to AES CCM */
+	cfg &= ~CR_ALGO_MASK;
+	cfg |= CR_AES_CCM;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* f) change phase to header */
+	cfg &= ~CR_PH_MASK;
+	cfg |= CR_PH_HEADER;
+	stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+	/* g) XOR and write padded data */
+	for (i = 0; i < ARRAY_SIZE(tmp); i++) {
+		tmp[i] ^= cstmp1[i];
+		tmp[i] ^= cstmp2[i];
+		stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
+	}
+
+	/* h) wait for completion */
+	err = stm32_cryp_wait_busy(cryp);
+	if (err)
+		dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
+
+	/* i) run the he normal Final phase */
+	stm32_cryp_finish_req(cryp, err);
+}
+
+static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
+{
+	if (unlikely(!cryp->total_in)) {
+		dev_warn(cryp->dev, "No more data to process\n");
+		return;
+	}
+
+	if (unlikely(cryp->total_in < AES_BLOCK_SIZE &&
+		     (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) &&
+		     is_encrypt(cryp))) {
+		/* Padding for AES GCM encryption */
+		if (cryp->caps->padding_wa)
+			/* Special case 1 */
+			return stm32_cryp_irq_write_gcm_padded_data(cryp);
+
+		/* Setting padding bytes (NBBLB) */
+		stm32_cryp_irq_set_npblb(cryp);
+	}
+
+	if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) &&
+		     (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) &&
+		     is_decrypt(cryp))) {
+		/* Padding for AES CCM decryption */
+		if (cryp->caps->padding_wa)
+			/* Special case 2 */
+			return stm32_cryp_irq_write_ccm_padded_data(cryp);
+
+		/* Setting padding bytes (NBBLB) */
+		stm32_cryp_irq_set_npblb(cryp);
+	}
+
+	if (is_aes(cryp) && is_ctr(cryp))
+		stm32_cryp_check_ctr_counter(cryp);
+
+	stm32_cryp_irq_write_block(cryp);
+}
+
+static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp)
+{
+	int err;
+	unsigned int i, j;
+	u32 cfg, *src;
+
+	src = sg_virt(cryp->in_sg) + _walked_in;
+
+	for (i = 0; i < AES_BLOCK_32; i++) {
+		stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+		src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+		cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
+
+		/* Check if whole header written */
+		if ((cryp->total_in_save - cryp->total_in) ==
+				cryp->areq->assoclen) {
+			/* Write padding if needed */
+			for (j = i + 1; j < AES_BLOCK_32; j++)
+				stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+			/* Wait for completion */
+			err = stm32_cryp_wait_busy(cryp);
+			if (err) {
+				dev_err(cryp->dev, "Timeout (gcm header)\n");
+				return stm32_cryp_finish_req(cryp, err);
+			}
+
+			if (stm32_cryp_get_input_text_len(cryp)) {
+				/* Phase 3 : payload */
+				cfg = stm32_cryp_read(cryp, CRYP_CR);
+				cfg &= ~CR_CRYPEN;
+				stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+				cfg &= ~CR_PH_MASK;
+				cfg |= CR_PH_PAYLOAD;
+				cfg |= CR_CRYPEN;
+				stm32_cryp_write(cryp, CRYP_CR, cfg);
+			} else {
+				/* Phase 4 : tag */
+				stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+				stm32_cryp_finish_req(cryp, 0);
+			}
+
+			break;
+		}
+
+		if (!cryp->total_in)
+			break;
+	}
+}
+
+static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
+{
+	int err;
+	unsigned int i = 0, j, k;
+	u32 alen, cfg, *src;
+	u8 d8[4];
+
+	src = sg_virt(cryp->in_sg) + _walked_in;
+	alen = cryp->areq->assoclen;
+
+	if (!_walked_in) {
+		if (cryp->areq->assoclen <= 65280) {
+			/* Write first u32 of B1 */
+			d8[0] = (alen >> 8) & 0xFF;
+			d8[1] = alen & 0xFF;
+			d8[2] = *((u8 *)src);
+			src = stm32_cryp_next_in(cryp, src, 1);
+			d8[3] = *((u8 *)src);
+			src = stm32_cryp_next_in(cryp, src, 1);
+
+			stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+			i++;
+
+			cryp->total_in -= min_t(size_t, 2, cryp->total_in);
+		} else {
+			/* Build the two first u32 of B1 */
+			d8[0] = 0xFF;
+			d8[1] = 0xFE;
+			d8[2] = alen & 0xFF000000;
+			d8[3] = alen & 0x00FF0000;
+
+			stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+			i++;
+
+			d8[0] = alen & 0x0000FF00;
+			d8[1] = alen & 0x000000FF;
+			d8[2] = *((u8 *)src);
+			src = stm32_cryp_next_in(cryp, src, 1);
+			d8[3] = *((u8 *)src);
+			src = stm32_cryp_next_in(cryp, src, 1);
+
+			stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+			i++;
+
+			cryp->total_in -= min_t(size_t, 2, cryp->total_in);
+		}
+	}
+
+	/* Write next u32 */
+	for (; i < AES_BLOCK_32; i++) {
+		/* Build an u32 */
+		memset(d8, 0, sizeof(u32));
+		for (k = 0; k < sizeof(u32); k++) {
+			d8[k] = *((u8 *)src);
+			src = stm32_cryp_next_in(cryp, src, 1);
+
+			cryp->total_in -= min_t(size_t, 1, cryp->total_in);
+			if ((cryp->total_in_save - cryp->total_in) == alen)
+				break;
+		}
+
+		stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+
+		if ((cryp->total_in_save - cryp->total_in) == alen) {
+			/* Write padding if needed */
+			for (j = i + 1; j < AES_BLOCK_32; j++)
+				stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+			/* Wait for completion */
+			err = stm32_cryp_wait_busy(cryp);
+			if (err) {
+				dev_err(cryp->dev, "Timeout (ccm header)\n");
+				return stm32_cryp_finish_req(cryp, err);
+			}
+
+			if (stm32_cryp_get_input_text_len(cryp)) {
+				/* Phase 3 : payload */
+				cfg = stm32_cryp_read(cryp, CRYP_CR);
+				cfg &= ~CR_CRYPEN;
+				stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+				cfg &= ~CR_PH_MASK;
+				cfg |= CR_PH_PAYLOAD;
+				cfg |= CR_CRYPEN;
+				stm32_cryp_write(cryp, CRYP_CR, cfg);
+			} else {
+				/* Phase 4 : tag */
+				stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+				stm32_cryp_finish_req(cryp, 0);
+			}
+
+			break;
+		}
+	}
+}
+
+static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+{
+	struct stm32_cryp *cryp = arg;
+	u32 ph;
+
+	if (cryp->irq_status & MISR_OUT)
+		/* Output FIFO IRQ: read data */
+		if (unlikely(stm32_cryp_irq_read_data(cryp))) {
+			/* All bytes processed, finish */
+			stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+			stm32_cryp_finish_req(cryp, 0);
+			return IRQ_HANDLED;
+		}
+
+	if (cryp->irq_status & MISR_IN) {
+		if (is_gcm(cryp)) {
+			ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
+			if (unlikely(ph == CR_PH_HEADER))
+				/* Write Header */
+				stm32_cryp_irq_write_gcm_header(cryp);
+			else
+				/* Input FIFO IRQ: write data */
+				stm32_cryp_irq_write_data(cryp);
+			cryp->gcm_ctr++;
+		} else if (is_ccm(cryp)) {
+			ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
+			if (unlikely(ph == CR_PH_HEADER))
+				/* Write Header */
+				stm32_cryp_irq_write_ccm_header(cryp);
+			else
+				/* Input FIFO IRQ: write data */
+				stm32_cryp_irq_write_data(cryp);
+		} else {
+			/* Input FIFO IRQ: write data */
+			stm32_cryp_irq_write_data(cryp);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cryp_irq(int irq, void *arg)
+{
+	struct stm32_cryp *cryp = arg;
+
+	cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static struct crypto_alg crypto_algs[] = {
+{
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "stm32-ecb-aes",
+	.cra_priority		= 200,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= stm32_cryp_cra_init,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.setkey		= stm32_cryp_aes_setkey,
+		.encrypt	= stm32_cryp_aes_ecb_encrypt,
+		.decrypt	= stm32_cryp_aes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "stm32-cbc-aes",
+	.cra_priority		= 200,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= stm32_cryp_cra_init,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= stm32_cryp_aes_setkey,
+		.encrypt	= stm32_cryp_aes_cbc_encrypt,
+		.decrypt	= stm32_cryp_aes_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "ctr(aes)",
+	.cra_driver_name	= "stm32-ctr-aes",
+	.cra_priority		= 200,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= stm32_cryp_cra_init,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= stm32_cryp_aes_setkey,
+		.encrypt	= stm32_cryp_aes_ctr_encrypt,
+		.decrypt	= stm32_cryp_aes_ctr_decrypt,
+	}
+},
+{
+	.cra_name		= "ecb(des)",
+	.cra_driver_name	= "stm32-ecb-des",
+	.cra_priority		= 200,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= stm32_cryp_cra_init,
+	.cra_ablkcipher = {
+		.min_keysize	= DES_BLOCK_SIZE,
+		.max_keysize	= DES_BLOCK_SIZE,
+		.setkey		= stm32_cryp_des_setkey,
+		.encrypt	= stm32_cryp_des_ecb_encrypt,
+		.decrypt	= stm32_cryp_des_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(des)",
+	.cra_driver_name	= "stm32-cbc-des",
+	.cra_priority		= 200,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= stm32_cryp_cra_init,
+	.cra_ablkcipher = {
+		.min_keysize	= DES_BLOCK_SIZE,
+		.max_keysize	= DES_BLOCK_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= stm32_cryp_des_setkey,
+		.encrypt	= stm32_cryp_des_cbc_encrypt,
+		.decrypt	= stm32_cryp_des_cbc_decrypt,
+	}
+},
+{
+	.cra_name		= "ecb(des3_ede)",
+	.cra_driver_name	= "stm32-ecb-des3",
+	.cra_priority		= 200,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= stm32_cryp_cra_init,
+	.cra_ablkcipher = {
+		.min_keysize	= 3 * DES_BLOCK_SIZE,
+		.max_keysize	= 3 * DES_BLOCK_SIZE,
+		.setkey		= stm32_cryp_tdes_setkey,
+		.encrypt	= stm32_cryp_tdes_ecb_encrypt,
+		.decrypt	= stm32_cryp_tdes_ecb_decrypt,
+	}
+},
+{
+	.cra_name		= "cbc(des3_ede)",
+	.cra_driver_name	= "stm32-cbc-des3",
+	.cra_priority		= 200,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= DES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= stm32_cryp_cra_init,
+	.cra_ablkcipher = {
+		.min_keysize	= 3 * DES_BLOCK_SIZE,
+		.max_keysize	= 3 * DES_BLOCK_SIZE,
+		.ivsize		= DES_BLOCK_SIZE,
+		.setkey		= stm32_cryp_tdes_setkey,
+		.encrypt	= stm32_cryp_tdes_cbc_encrypt,
+		.decrypt	= stm32_cryp_tdes_cbc_decrypt,
+	}
+},
+};
+
+static struct aead_alg aead_algs[] = {
+{
+	.setkey		= stm32_cryp_aes_aead_setkey,
+	.setauthsize	= stm32_cryp_aes_gcm_setauthsize,
+	.encrypt	= stm32_cryp_aes_gcm_encrypt,
+	.decrypt	= stm32_cryp_aes_gcm_decrypt,
+	.init		= stm32_cryp_aes_aead_init,
+	.ivsize		= 12,
+	.maxauthsize	= AES_BLOCK_SIZE,
+
+	.base = {
+		.cra_name		= "gcm(aes)",
+		.cra_driver_name	= "stm32-gcm-aes",
+		.cra_priority		= 200,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+},
+{
+	.setkey		= stm32_cryp_aes_aead_setkey,
+	.setauthsize	= stm32_cryp_aes_ccm_setauthsize,
+	.encrypt	= stm32_cryp_aes_ccm_encrypt,
+	.decrypt	= stm32_cryp_aes_ccm_decrypt,
+	.init		= stm32_cryp_aes_aead_init,
+	.ivsize		= AES_BLOCK_SIZE,
+	.maxauthsize	= AES_BLOCK_SIZE,
+
+	.base = {
+		.cra_name		= "ccm(aes)",
+		.cra_driver_name	= "stm32-ccm-aes",
+		.cra_priority		= 200,
+		.cra_flags		= CRYPTO_ALG_ASYNC,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct stm32_cryp_ctx),
+		.cra_alignmask		= 0xf,
+		.cra_module		= THIS_MODULE,
+	},
+},
+};
+
+static const struct stm32_cryp_caps f7_data = {
+	.swap_final = true,
+	.padding_wa = true,
+};
+
+static const struct stm32_cryp_caps mp1_data = {
+	.swap_final = false,
+	.padding_wa = false,
+};
+
+static const struct of_device_id stm32_dt_ids[] = {
+	{ .compatible = "st,stm32f756-cryp", .data = &f7_data},
+	{ .compatible = "st,stm32mp1-cryp", .data = &mp1_data},
+	{},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static int stm32_cryp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct stm32_cryp *cryp;
+	struct resource *res;
+	struct reset_control *rst;
+	int irq, ret;
+
+	cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL);
+	if (!cryp)
+		return -ENOMEM;
+
+	cryp->caps = of_device_get_match_data(dev);
+	if (!cryp->caps)
+		return -ENODEV;
+
+	cryp->dev = dev;
+
+	mutex_init(&cryp->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	cryp->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(cryp->regs))
+		return PTR_ERR(cryp->regs);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "Cannot get IRQ resource\n");
+		return irq;
+	}
+
+	ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq,
+					stm32_cryp_irq_thread, IRQF_ONESHOT,
+					dev_name(dev), cryp);
+	if (ret) {
+		dev_err(dev, "Cannot grab IRQ\n");
+		return ret;
+	}
+
+	cryp->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(cryp->clk)) {
+		dev_err(dev, "Could not get clock\n");
+		return PTR_ERR(cryp->clk);
+	}
+
+	ret = clk_prepare_enable(cryp->clk);
+	if (ret) {
+		dev_err(cryp->dev, "Failed to enable clock\n");
+		return ret;
+	}
+
+	pm_runtime_set_autosuspend_delay(dev, CRYP_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(dev);
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	rst = devm_reset_control_get(dev, NULL);
+	if (!IS_ERR(rst)) {
+		reset_control_assert(rst);
+		udelay(2);
+		reset_control_deassert(rst);
+	}
+
+	platform_set_drvdata(pdev, cryp);
+
+	spin_lock(&cryp_list.lock);
+	list_add(&cryp->list, &cryp_list.dev_list);
+	spin_unlock(&cryp_list.lock);
+
+	/* Initialize crypto engine */
+	cryp->engine = crypto_engine_alloc_init(dev, 1);
+	if (!cryp->engine) {
+		dev_err(dev, "Could not init crypto engine\n");
+		ret = -ENOMEM;
+		goto err_engine1;
+	}
+
+	ret = crypto_engine_start(cryp->engine);
+	if (ret) {
+		dev_err(dev, "Could not start crypto engine\n");
+		goto err_engine2;
+	}
+
+	ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+	if (ret) {
+		dev_err(dev, "Could not register algs\n");
+		goto err_algs;
+	}
+
+	ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+	if (ret)
+		goto err_aead_algs;
+
+	dev_info(dev, "Initialized\n");
+
+	pm_runtime_put_sync(dev);
+
+	return 0;
+
+err_aead_algs:
+	crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+err_algs:
+err_engine2:
+	crypto_engine_exit(cryp->engine);
+err_engine1:
+	spin_lock(&cryp_list.lock);
+	list_del(&cryp->list);
+	spin_unlock(&cryp_list.lock);
+
+	pm_runtime_disable(dev);
+	pm_runtime_put_noidle(dev);
+	pm_runtime_disable(dev);
+	pm_runtime_put_noidle(dev);
+
+	clk_disable_unprepare(cryp->clk);
+
+	return ret;
+}
+
+static int stm32_cryp_remove(struct platform_device *pdev)
+{
+	struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+	int ret;
+
+	if (!cryp)
+		return -ENODEV;
+
+	ret = pm_runtime_get_sync(cryp->dev);
+	if (ret < 0)
+		return ret;
+
+	crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+	crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+
+	crypto_engine_exit(cryp->engine);
+
+	spin_lock(&cryp_list.lock);
+	list_del(&cryp->list);
+	spin_unlock(&cryp_list.lock);
+
+	pm_runtime_disable(cryp->dev);
+	pm_runtime_put_noidle(cryp->dev);
+
+	clk_disable_unprepare(cryp->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_cryp_runtime_suspend(struct device *dev)
+{
+	struct stm32_cryp *cryp = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(cryp->clk);
+
+	return 0;
+}
+
+static int stm32_cryp_runtime_resume(struct device *dev)
+{
+	struct stm32_cryp *cryp = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(cryp->clk);
+	if (ret) {
+		dev_err(cryp->dev, "Failed to prepare_enable clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_cryp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(stm32_cryp_runtime_suspend,
+			   stm32_cryp_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_cryp_driver = {
+	.probe  = stm32_cryp_probe,
+	.remove = stm32_cryp_remove,
+	.driver = {
+		.name           = DRIVER_NAME,
+		.pm		= &stm32_cryp_pm_ops,
+		.of_match_table = stm32_dt_ids,
+	},
+};
+
+module_platform_driver(stm32_cryp_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
new file mode 100644
index 0000000..590d735
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -0,0 +1,1643 @@
+/*
+ * This file is part of STM32 Crypto driver for Linux.
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define HASH_CR				0x00
+#define HASH_DIN			0x04
+#define HASH_STR			0x08
+#define HASH_IMR			0x20
+#define HASH_SR				0x24
+#define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
+#define HASH_HREG(x)			(0x310 + ((x) * 0x04))
+#define HASH_HWCFGR			0x3F0
+#define HASH_VER			0x3F4
+#define HASH_ID				0x3F8
+
+/* Control Register */
+#define HASH_CR_INIT			BIT(2)
+#define HASH_CR_DMAE			BIT(3)
+#define HASH_CR_DATATYPE_POS		4
+#define HASH_CR_MODE			BIT(6)
+#define HASH_CR_MDMAT			BIT(13)
+#define HASH_CR_DMAA			BIT(14)
+#define HASH_CR_LKEY			BIT(16)
+
+#define HASH_CR_ALGO_SHA1		0x0
+#define HASH_CR_ALGO_MD5		0x80
+#define HASH_CR_ALGO_SHA224		0x40000
+#define HASH_CR_ALGO_SHA256		0x40080
+
+/* Interrupt */
+#define HASH_DINIE			BIT(0)
+#define HASH_DCIE			BIT(1)
+
+/* Interrupt Mask */
+#define HASH_MASK_CALC_COMPLETION	BIT(0)
+#define HASH_MASK_DATA_INPUT		BIT(1)
+
+/* Context swap register */
+#define HASH_CSR_REGISTER_NUMBER	53
+
+/* Status Flags */
+#define HASH_SR_DATA_INPUT_READY	BIT(0)
+#define HASH_SR_OUTPUT_READY		BIT(1)
+#define HASH_SR_DMA_ACTIVE		BIT(2)
+#define HASH_SR_BUSY			BIT(3)
+
+/* STR Register */
+#define HASH_STR_NBLW_MASK		GENMASK(4, 0)
+#define HASH_STR_DCAL			BIT(8)
+
+#define HASH_FLAGS_INIT			BIT(0)
+#define HASH_FLAGS_OUTPUT_READY		BIT(1)
+#define HASH_FLAGS_CPU			BIT(2)
+#define HASH_FLAGS_DMA_READY		BIT(3)
+#define HASH_FLAGS_DMA_ACTIVE		BIT(4)
+#define HASH_FLAGS_HMAC_INIT		BIT(5)
+#define HASH_FLAGS_HMAC_FINAL		BIT(6)
+#define HASH_FLAGS_HMAC_KEY		BIT(7)
+
+#define HASH_FLAGS_FINAL		BIT(15)
+#define HASH_FLAGS_FINUP		BIT(16)
+#define HASH_FLAGS_ALGO_MASK		GENMASK(21, 18)
+#define HASH_FLAGS_MD5			BIT(18)
+#define HASH_FLAGS_SHA1			BIT(19)
+#define HASH_FLAGS_SHA224		BIT(20)
+#define HASH_FLAGS_SHA256		BIT(21)
+#define HASH_FLAGS_ERRORS		BIT(22)
+#define HASH_FLAGS_HMAC			BIT(23)
+
+#define HASH_OP_UPDATE			1
+#define HASH_OP_FINAL			2
+
+enum stm32_hash_data_format {
+	HASH_DATA_32_BITS		= 0x0,
+	HASH_DATA_16_BITS		= 0x1,
+	HASH_DATA_8_BITS		= 0x2,
+	HASH_DATA_1_BIT			= 0x3
+};
+
+#define HASH_BUFLEN			256
+#define HASH_LONG_KEY			64
+#define HASH_MAX_KEY_SIZE		(SHA256_BLOCK_SIZE * 8)
+#define HASH_QUEUE_LENGTH		16
+#define HASH_DMA_THRESHOLD		50
+
+#define HASH_AUTOSUSPEND_DELAY		50
+
+struct stm32_hash_ctx {
+	struct crypto_engine_ctx enginectx;
+	struct stm32_hash_dev	*hdev;
+	unsigned long		flags;
+
+	u8			key[HASH_MAX_KEY_SIZE];
+	int			keylen;
+};
+
+struct stm32_hash_request_ctx {
+	struct stm32_hash_dev	*hdev;
+	unsigned long		flags;
+	unsigned long		op;
+
+	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+	size_t			digcnt;
+	size_t			bufcnt;
+	size_t			buflen;
+
+	/* DMA */
+	struct scatterlist	*sg;
+	unsigned int		offset;
+	unsigned int		total;
+	struct scatterlist	sg_key;
+
+	dma_addr_t		dma_addr;
+	size_t			dma_ct;
+	int			nents;
+
+	u8			data_type;
+
+	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
+
+	/* Export Context */
+	u32			*hw_context;
+};
+
+struct stm32_hash_algs_info {
+	struct ahash_alg	*algs_list;
+	size_t			size;
+};
+
+struct stm32_hash_pdata {
+	struct stm32_hash_algs_info	*algs_info;
+	size_t				algs_info_size;
+};
+
+struct stm32_hash_dev {
+	struct list_head	list;
+	struct device		*dev;
+	struct clk		*clk;
+	struct reset_control	*rst;
+	void __iomem		*io_base;
+	phys_addr_t		phys_base;
+	u32			dma_mode;
+	u32			dma_maxburst;
+
+	spinlock_t		lock; /* lock to protect queue */
+
+	struct ahash_request	*req;
+	struct crypto_engine	*engine;
+
+	int			err;
+	unsigned long		flags;
+
+	struct dma_chan		*dma_lch;
+	struct completion	dma_completion;
+
+	const struct stm32_hash_pdata	*pdata;
+};
+
+struct stm32_hash_drv {
+	struct list_head	dev_list;
+	spinlock_t		lock; /* List protection access */
+};
+
+static struct stm32_hash_drv stm32_hash = {
+	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
+	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
+};
+
+static void stm32_hash_dma_callback(void *param);
+
+static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
+{
+	return readl_relaxed(hdev->io_base + offset);
+}
+
+static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
+				    u32 offset, u32 value)
+{
+	writel_relaxed(value, hdev->io_base + offset);
+}
+
+static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
+{
+	u32 status;
+
+	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
+				   !(status & HASH_SR_BUSY), 10, 10000);
+}
+
+static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
+{
+	u32 reg;
+
+	reg = stm32_hash_read(hdev, HASH_STR);
+	reg &= ~(HASH_STR_NBLW_MASK);
+	reg |= (8U * ((length) % 4U));
+	stm32_hash_write(hdev, HASH_STR, reg);
+}
+
+static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	u32 reg;
+	int keylen = ctx->keylen;
+	void *key = ctx->key;
+
+	if (keylen) {
+		stm32_hash_set_nblw(hdev, keylen);
+
+		while (keylen > 0) {
+			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
+			keylen -= 4;
+			key += 4;
+		}
+
+		reg = stm32_hash_read(hdev, HASH_STR);
+		reg |= HASH_STR_DCAL;
+		stm32_hash_write(hdev, HASH_STR, reg);
+
+		return -EINPROGRESS;
+	}
+
+	return 0;
+}
+
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	u32 reg = HASH_CR_INIT;
+
+	if (!(hdev->flags & HASH_FLAGS_INIT)) {
+		switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+		case HASH_FLAGS_MD5:
+			reg |= HASH_CR_ALGO_MD5;
+			break;
+		case HASH_FLAGS_SHA1:
+			reg |= HASH_CR_ALGO_SHA1;
+			break;
+		case HASH_FLAGS_SHA224:
+			reg |= HASH_CR_ALGO_SHA224;
+			break;
+		case HASH_FLAGS_SHA256:
+			reg |= HASH_CR_ALGO_SHA256;
+			break;
+		default:
+			reg |= HASH_CR_ALGO_MD5;
+		}
+
+		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
+
+		if (rctx->flags & HASH_FLAGS_HMAC) {
+			hdev->flags |= HASH_FLAGS_HMAC;
+			reg |= HASH_CR_MODE;
+			if (ctx->keylen > HASH_LONG_KEY)
+				reg |= HASH_CR_LKEY;
+		}
+
+		stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
+
+		stm32_hash_write(hdev, HASH_CR, reg);
+
+		hdev->flags |= HASH_FLAGS_INIT;
+
+		dev_dbg(hdev->dev, "Write Control %x\n", reg);
+	}
+}
+
+static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
+{
+	size_t count;
+
+	while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
+		count = min(rctx->sg->length - rctx->offset, rctx->total);
+		count = min(count, rctx->buflen - rctx->bufcnt);
+
+		if (count <= 0) {
+			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
+				rctx->sg = sg_next(rctx->sg);
+				continue;
+			} else {
+				break;
+			}
+		}
+
+		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
+					 rctx->offset, count, 0);
+
+		rctx->bufcnt += count;
+		rctx->offset += count;
+		rctx->total -= count;
+
+		if (rctx->offset == rctx->sg->length) {
+			rctx->sg = sg_next(rctx->sg);
+			if (rctx->sg)
+				rctx->offset = 0;
+			else
+				rctx->total = 0;
+		}
+	}
+}
+
+static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
+			       const u8 *buf, size_t length, int final)
+{
+	unsigned int count, len32;
+	const u32 *buffer = (const u32 *)buf;
+	u32 reg;
+
+	if (final)
+		hdev->flags |= HASH_FLAGS_FINAL;
+
+	len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+	dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
+		__func__, length, final, len32);
+
+	hdev->flags |= HASH_FLAGS_CPU;
+
+	stm32_hash_write_ctrl(hdev);
+
+	if (stm32_hash_wait_busy(hdev))
+		return -ETIMEDOUT;
+
+	if ((hdev->flags & HASH_FLAGS_HMAC) &&
+	    (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
+		hdev->flags |= HASH_FLAGS_HMAC_KEY;
+		stm32_hash_write_key(hdev);
+		if (stm32_hash_wait_busy(hdev))
+			return -ETIMEDOUT;
+	}
+
+	for (count = 0; count < len32; count++)
+		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
+
+	if (final) {
+		stm32_hash_set_nblw(hdev, length);
+		reg = stm32_hash_read(hdev, HASH_STR);
+		reg |= HASH_STR_DCAL;
+		stm32_hash_write(hdev, HASH_STR, reg);
+		if (hdev->flags & HASH_FLAGS_HMAC) {
+			if (stm32_hash_wait_busy(hdev))
+				return -ETIMEDOUT;
+			stm32_hash_write_key(hdev);
+		}
+		return -EINPROGRESS;
+	}
+
+	return 0;
+}
+
+static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+	int bufcnt, err = 0, final;
+
+	dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
+
+	final = (rctx->flags & HASH_FLAGS_FINUP);
+
+	while ((rctx->total >= rctx->buflen) ||
+	       (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+		stm32_hash_append_sg(rctx);
+		bufcnt = rctx->bufcnt;
+		rctx->bufcnt = 0;
+		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
+	}
+
+	stm32_hash_append_sg(rctx);
+
+	if (final) {
+		bufcnt = rctx->bufcnt;
+		rctx->bufcnt = 0;
+		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
+					  (rctx->flags & HASH_FLAGS_FINUP));
+	}
+
+	return err;
+}
+
+static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
+			       struct scatterlist *sg, int length, int mdma)
+{
+	struct dma_async_tx_descriptor *in_desc;
+	dma_cookie_t cookie;
+	u32 reg;
+	int err;
+
+	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
+					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
+					  DMA_CTRL_ACK);
+	if (!in_desc) {
+		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
+		return -ENOMEM;
+	}
+
+	reinit_completion(&hdev->dma_completion);
+	in_desc->callback = stm32_hash_dma_callback;
+	in_desc->callback_param = hdev;
+
+	hdev->flags |= HASH_FLAGS_FINAL;
+	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
+
+	reg = stm32_hash_read(hdev, HASH_CR);
+
+	if (mdma)
+		reg |= HASH_CR_MDMAT;
+	else
+		reg &= ~HASH_CR_MDMAT;
+
+	reg |= HASH_CR_DMAE;
+
+	stm32_hash_write(hdev, HASH_CR, reg);
+
+	stm32_hash_set_nblw(hdev, length);
+
+	cookie = dmaengine_submit(in_desc);
+	err = dma_submit_error(cookie);
+	if (err)
+		return -ENOMEM;
+
+	dma_async_issue_pending(hdev->dma_lch);
+
+	if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
+						       msecs_to_jiffies(100)))
+		err = -ETIMEDOUT;
+
+	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
+				     NULL, NULL) != DMA_COMPLETE)
+		err = -ETIMEDOUT;
+
+	if (err) {
+		dev_err(hdev->dev, "DMA Error %i\n", err);
+		dmaengine_terminate_all(hdev->dma_lch);
+		return err;
+	}
+
+	return -EINPROGRESS;
+}
+
+static void stm32_hash_dma_callback(void *param)
+{
+	struct stm32_hash_dev *hdev = param;
+
+	complete(&hdev->dma_completion);
+
+	hdev->flags |= HASH_FLAGS_DMA_READY;
+}
+
+static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	int err;
+
+	if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
+		err = stm32_hash_write_key(hdev);
+		if (stm32_hash_wait_busy(hdev))
+			return -ETIMEDOUT;
+	} else {
+		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
+			sg_init_one(&rctx->sg_key, ctx->key,
+				    ALIGN(ctx->keylen, sizeof(u32)));
+
+		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
+					  DMA_TO_DEVICE);
+		if (rctx->dma_ct == 0) {
+			dev_err(hdev->dev, "dma_map_sg error\n");
+			return -ENOMEM;
+		}
+
+		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
+
+		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
+	}
+
+	return err;
+}
+
+static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
+{
+	struct dma_slave_config dma_conf;
+	int err;
+
+	memset(&dma_conf, 0, sizeof(dma_conf));
+
+	dma_conf.direction = DMA_MEM_TO_DEV;
+	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
+	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dma_conf.src_maxburst = hdev->dma_maxburst;
+	dma_conf.dst_maxburst = hdev->dma_maxburst;
+	dma_conf.device_fc = false;
+
+	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
+	if (!hdev->dma_lch) {
+		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
+		return -EBUSY;
+	}
+
+	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
+	if (err) {
+		dma_release_channel(hdev->dma_lch);
+		hdev->dma_lch = NULL;
+		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
+		return err;
+	}
+
+	init_completion(&hdev->dma_completion);
+
+	return 0;
+}
+
+static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+	struct scatterlist sg[1], *tsg;
+	int err = 0, len = 0, reg, ncp = 0;
+	unsigned int i;
+	u32 *buffer = (void *)rctx->buffer;
+
+	rctx->sg = hdev->req->src;
+	rctx->total = hdev->req->nbytes;
+
+	rctx->nents = sg_nents(rctx->sg);
+
+	if (rctx->nents < 0)
+		return -EINVAL;
+
+	stm32_hash_write_ctrl(hdev);
+
+	if (hdev->flags & HASH_FLAGS_HMAC) {
+		err = stm32_hash_hmac_dma_send(hdev);
+		if (err != -EINPROGRESS)
+			return err;
+	}
+
+	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+		len = sg->length;
+
+		sg[0] = *tsg;
+		if (sg_is_last(sg)) {
+			if (hdev->dma_mode == 1) {
+				len = (ALIGN(sg->length, 16) - 16);
+
+				ncp = sg_pcopy_to_buffer(
+					rctx->sg, rctx->nents,
+					rctx->buffer, sg->length - len,
+					rctx->total - sg->length + len);
+
+				sg->length = len;
+			} else {
+				if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
+					len = sg->length;
+					sg->length = ALIGN(sg->length,
+							   sizeof(u32));
+				}
+			}
+		}
+
+		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
+					  DMA_TO_DEVICE);
+		if (rctx->dma_ct == 0) {
+			dev_err(hdev->dev, "dma_map_sg error\n");
+			return -ENOMEM;
+		}
+
+		err = stm32_hash_xmit_dma(hdev, sg, len,
+					  !sg_is_last(sg));
+
+		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
+
+		if (err == -ENOMEM)
+			return err;
+	}
+
+	if (hdev->dma_mode == 1) {
+		if (stm32_hash_wait_busy(hdev))
+			return -ETIMEDOUT;
+		reg = stm32_hash_read(hdev, HASH_CR);
+		reg &= ~HASH_CR_DMAE;
+		reg |= HASH_CR_DMAA;
+		stm32_hash_write(hdev, HASH_CR, reg);
+
+		if (ncp) {
+			memset(buffer + ncp, 0,
+			       DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
+			writesl(hdev->io_base + HASH_DIN, buffer,
+				DIV_ROUND_UP(ncp, sizeof(u32)));
+		}
+		stm32_hash_set_nblw(hdev, ncp);
+		reg = stm32_hash_read(hdev, HASH_STR);
+		reg |= HASH_STR_DCAL;
+		stm32_hash_write(hdev, HASH_STR, reg);
+		err = -EINPROGRESS;
+	}
+
+	if (hdev->flags & HASH_FLAGS_HMAC) {
+		if (stm32_hash_wait_busy(hdev))
+			return -ETIMEDOUT;
+		err = stm32_hash_hmac_dma_send(hdev);
+	}
+
+	return err;
+}
+
+static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
+{
+	struct stm32_hash_dev *hdev = NULL, *tmp;
+
+	spin_lock_bh(&stm32_hash.lock);
+	if (!ctx->hdev) {
+		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
+			hdev = tmp;
+			break;
+		}
+		ctx->hdev = hdev;
+	} else {
+		hdev = ctx->hdev;
+	}
+
+	spin_unlock_bh(&stm32_hash.lock);
+
+	return hdev;
+}
+
+static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
+{
+	struct scatterlist *sg;
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+	int i;
+
+	if (req->nbytes <= HASH_DMA_THRESHOLD)
+		return false;
+
+	if (sg_nents(req->src) > 1) {
+		if (hdev->dma_mode == 1)
+			return false;
+		for_each_sg(req->src, sg, sg_nents(req->src), i) {
+			if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
+			    (!sg_is_last(sg)))
+				return false;
+		}
+	}
+
+	if (req->src->offset % 4)
+		return false;
+
+	return true;
+}
+
+static int stm32_hash_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+
+	rctx->hdev = hdev;
+
+	rctx->flags = HASH_FLAGS_CPU;
+
+	rctx->digcnt = crypto_ahash_digestsize(tfm);
+	switch (rctx->digcnt) {
+	case MD5_DIGEST_SIZE:
+		rctx->flags |= HASH_FLAGS_MD5;
+		break;
+	case SHA1_DIGEST_SIZE:
+		rctx->flags |= HASH_FLAGS_SHA1;
+		break;
+	case SHA224_DIGEST_SIZE:
+		rctx->flags |= HASH_FLAGS_SHA224;
+		break;
+	case SHA256_DIGEST_SIZE:
+		rctx->flags |= HASH_FLAGS_SHA256;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rctx->bufcnt = 0;
+	rctx->buflen = HASH_BUFLEN;
+	rctx->total = 0;
+	rctx->offset = 0;
+	rctx->data_type = HASH_DATA_8_BITS;
+
+	memset(rctx->buffer, 0, HASH_BUFLEN);
+
+	if (ctx->flags & HASH_FLAGS_HMAC)
+		rctx->flags |= HASH_FLAGS_HMAC;
+
+	dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
+
+	return 0;
+}
+
+static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
+{
+	return stm32_hash_update_cpu(hdev);
+}
+
+static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
+{
+	struct ahash_request *req = hdev->req;
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+	int err;
+	int buflen = rctx->bufcnt;
+
+	rctx->bufcnt = 0;
+
+	if (!(rctx->flags & HASH_FLAGS_CPU))
+		err = stm32_hash_dma_send(hdev);
+	else
+		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
+
+
+	return err;
+}
+
+static void stm32_hash_copy_hash(struct ahash_request *req)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+	u32 *hash = (u32 *)rctx->digest;
+	unsigned int i, hashsize;
+
+	switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+	case HASH_FLAGS_MD5:
+		hashsize = MD5_DIGEST_SIZE;
+		break;
+	case HASH_FLAGS_SHA1:
+		hashsize = SHA1_DIGEST_SIZE;
+		break;
+	case HASH_FLAGS_SHA224:
+		hashsize = SHA224_DIGEST_SIZE;
+		break;
+	case HASH_FLAGS_SHA256:
+		hashsize = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return;
+	}
+
+	for (i = 0; i < hashsize / sizeof(u32); i++)
+		hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
+						      HASH_HREG(i)));
+}
+
+static int stm32_hash_finish(struct ahash_request *req)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+	if (!req->result)
+		return -EINVAL;
+
+	memcpy(req->result, rctx->digest, rctx->digcnt);
+
+	return 0;
+}
+
+static void stm32_hash_finish_req(struct ahash_request *req, int err)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct stm32_hash_dev *hdev = rctx->hdev;
+
+	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
+		stm32_hash_copy_hash(req);
+		err = stm32_hash_finish(req);
+		hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
+				 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
+				 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
+				 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
+				 HASH_FLAGS_HMAC_KEY);
+	} else {
+		rctx->flags |= HASH_FLAGS_ERRORS;
+	}
+
+	pm_runtime_mark_last_busy(hdev->dev);
+	pm_runtime_put_autosuspend(hdev->dev);
+
+	crypto_finalize_hash_request(hdev->engine, req, err);
+}
+
+static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
+			      struct stm32_hash_request_ctx *rctx)
+{
+	pm_runtime_get_sync(hdev->dev);
+
+	if (!(HASH_FLAGS_INIT & hdev->flags)) {
+		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
+		stm32_hash_write(hdev, HASH_STR, 0);
+		stm32_hash_write(hdev, HASH_DIN, 0);
+		stm32_hash_write(hdev, HASH_IMR, 0);
+		hdev->err = 0;
+	}
+
+	return 0;
+}
+
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
+static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
+
+static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
+				   struct ahash_request *req)
+{
+	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
+}
+
+static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
+{
+	struct ahash_request *req = container_of(areq, struct ahash_request,
+						 base);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+	struct stm32_hash_request_ctx *rctx;
+
+	if (!hdev)
+		return -ENODEV;
+
+	hdev->req = req;
+
+	rctx = ahash_request_ctx(req);
+
+	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
+		rctx->op, req->nbytes);
+
+	return stm32_hash_hw_init(hdev, rctx);
+}
+
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
+{
+	struct ahash_request *req = container_of(areq, struct ahash_request,
+						 base);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+	struct stm32_hash_request_ctx *rctx;
+	int err = 0;
+
+	if (!hdev)
+		return -ENODEV;
+
+	hdev->req = req;
+
+	rctx = ahash_request_ctx(req);
+
+	if (rctx->op == HASH_OP_UPDATE)
+		err = stm32_hash_update_req(hdev);
+	else if (rctx->op == HASH_OP_FINAL)
+		err = stm32_hash_final_req(hdev);
+
+	if (err != -EINPROGRESS)
+	/* done task will not finish it, so do it here */
+		stm32_hash_finish_req(req, err);
+
+	return 0;
+}
+
+static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct stm32_hash_dev *hdev = ctx->hdev;
+
+	rctx->op = op;
+
+	return stm32_hash_handle_queue(hdev, req);
+}
+
+static int stm32_hash_update(struct ahash_request *req)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+	if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
+		return 0;
+
+	rctx->total = req->nbytes;
+	rctx->sg = req->src;
+	rctx->offset = 0;
+
+	if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
+		stm32_hash_append_sg(rctx);
+		return 0;
+	}
+
+	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
+}
+
+static int stm32_hash_final(struct ahash_request *req)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+	rctx->flags |= HASH_FLAGS_FINUP;
+
+	return stm32_hash_enqueue(req, HASH_OP_FINAL);
+}
+
+static int stm32_hash_finup(struct ahash_request *req)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+	int err1, err2;
+
+	rctx->flags |= HASH_FLAGS_FINUP;
+
+	if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
+		rctx->flags &= ~HASH_FLAGS_CPU;
+
+	err1 = stm32_hash_update(req);
+
+	if (err1 == -EINPROGRESS || err1 == -EBUSY)
+		return err1;
+
+	/*
+	 * final() has to be always called to cleanup resources
+	 * even if update() failed, except EINPROGRESS
+	 */
+	err2 = stm32_hash_final(req);
+
+	return err1 ?: err2;
+}
+
+static int stm32_hash_digest(struct ahash_request *req)
+{
+	return stm32_hash_init(req) ?: stm32_hash_finup(req);
+}
+
+static int stm32_hash_export(struct ahash_request *req, void *out)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+	u32 *preg;
+	unsigned int i;
+
+	pm_runtime_get_sync(hdev->dev);
+
+	while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
+		cpu_relax();
+
+	rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
+					 sizeof(u32),
+					 GFP_KERNEL);
+
+	preg = rctx->hw_context;
+
+	*preg++ = stm32_hash_read(hdev, HASH_IMR);
+	*preg++ = stm32_hash_read(hdev, HASH_STR);
+	*preg++ = stm32_hash_read(hdev, HASH_CR);
+	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+	pm_runtime_mark_last_busy(hdev->dev);
+	pm_runtime_put_autosuspend(hdev->dev);
+
+	memcpy(out, rctx, sizeof(*rctx));
+
+	return 0;
+}
+
+static int stm32_hash_import(struct ahash_request *req, const void *in)
+{
+	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+	const u32 *preg = in;
+	u32 reg;
+	unsigned int i;
+
+	memcpy(rctx, in, sizeof(*rctx));
+
+	preg = rctx->hw_context;
+
+	pm_runtime_get_sync(hdev->dev);
+
+	stm32_hash_write(hdev, HASH_IMR, *preg++);
+	stm32_hash_write(hdev, HASH_STR, *preg++);
+	stm32_hash_write(hdev, HASH_CR, *preg);
+	reg = *preg++ | HASH_CR_INIT;
+	stm32_hash_write(hdev, HASH_CR, reg);
+
+	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+		stm32_hash_write(hdev, HASH_CSR(i), *preg++);
+
+	pm_runtime_mark_last_busy(hdev->dev);
+	pm_runtime_put_autosuspend(hdev->dev);
+
+	kfree(rctx->hw_context);
+
+	return 0;
+}
+
+static int stm32_hash_setkey(struct crypto_ahash *tfm,
+			     const u8 *key, unsigned int keylen)
+{
+	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	if (keylen <= HASH_MAX_KEY_SIZE) {
+		memcpy(ctx->key, key, keylen);
+		ctx->keylen = keylen;
+	} else {
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
+				    const char *algs_hmac_name)
+{
+	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct stm32_hash_request_ctx));
+
+	ctx->keylen = 0;
+
+	if (algs_hmac_name)
+		ctx->flags |= HASH_FLAGS_HMAC;
+
+	ctx->enginectx.op.do_one_request = stm32_hash_one_request;
+	ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
+	ctx->enginectx.op.unprepare_request = NULL;
+	return 0;
+}
+
+static int stm32_hash_cra_init(struct crypto_tfm *tfm)
+{
+	return stm32_hash_cra_init_algs(tfm, NULL);
+}
+
+static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
+{
+	return stm32_hash_cra_init_algs(tfm, "md5");
+}
+
+static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
+{
+	return stm32_hash_cra_init_algs(tfm, "sha1");
+}
+
+static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
+{
+	return stm32_hash_cra_init_algs(tfm, "sha224");
+}
+
+static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
+{
+	return stm32_hash_cra_init_algs(tfm, "sha256");
+}
+
+static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
+{
+	struct stm32_hash_dev *hdev = dev_id;
+
+	if (HASH_FLAGS_CPU & hdev->flags) {
+		if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
+			hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
+			goto finish;
+		}
+	} else if (HASH_FLAGS_DMA_READY & hdev->flags) {
+		if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
+			hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+				goto finish;
+		}
+	}
+
+	return IRQ_HANDLED;
+
+finish:
+	/* Finish current request */
+	stm32_hash_finish_req(hdev->req, 0);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
+{
+	struct stm32_hash_dev *hdev = dev_id;
+	u32 reg;
+
+	reg = stm32_hash_read(hdev, HASH_SR);
+	if (reg & HASH_SR_OUTPUT_READY) {
+		reg &= ~HASH_SR_OUTPUT_READY;
+		stm32_hash_write(hdev, HASH_SR, reg);
+		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+		/* Disable IT*/
+		stm32_hash_write(hdev, HASH_IMR, 0);
+		return IRQ_WAKE_THREAD;
+	}
+
+	return IRQ_NONE;
+}
+
+static struct ahash_alg algs_md5_sha1[] = {
+	{
+		.init = stm32_hash_init,
+		.update = stm32_hash_update,
+		.final = stm32_hash_final,
+		.finup = stm32_hash_finup,
+		.digest = stm32_hash_digest,
+		.export = stm32_hash_export,
+		.import = stm32_hash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct stm32_hash_request_ctx),
+			.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "stm32-md5",
+				.cra_priority = 200,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
+				.cra_alignmask = 3,
+				.cra_init = stm32_hash_cra_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = stm32_hash_init,
+		.update = stm32_hash_update,
+		.final = stm32_hash_final,
+		.finup = stm32_hash_finup,
+		.digest = stm32_hash_digest,
+		.export = stm32_hash_export,
+		.import = stm32_hash_import,
+		.setkey = stm32_hash_setkey,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct stm32_hash_request_ctx),
+			.base = {
+				.cra_name = "hmac(md5)",
+				.cra_driver_name = "stm32-hmac-md5",
+				.cra_priority = 200,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
+				.cra_alignmask = 3,
+				.cra_init = stm32_hash_cra_md5_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = stm32_hash_init,
+		.update = stm32_hash_update,
+		.final = stm32_hash_final,
+		.finup = stm32_hash_finup,
+		.digest = stm32_hash_digest,
+		.export = stm32_hash_export,
+		.import = stm32_hash_import,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct stm32_hash_request_ctx),
+			.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "stm32-sha1",
+				.cra_priority = 200,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
+				.cra_alignmask = 3,
+				.cra_init = stm32_hash_cra_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = stm32_hash_init,
+		.update = stm32_hash_update,
+		.final = stm32_hash_final,
+		.finup = stm32_hash_finup,
+		.digest = stm32_hash_digest,
+		.export = stm32_hash_export,
+		.import = stm32_hash_import,
+		.setkey = stm32_hash_setkey,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct stm32_hash_request_ctx),
+			.base = {
+				.cra_name = "hmac(sha1)",
+				.cra_driver_name = "stm32-hmac-sha1",
+				.cra_priority = 200,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
+				.cra_alignmask = 3,
+				.cra_init = stm32_hash_cra_sha1_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+};
+
+static struct ahash_alg algs_sha224_sha256[] = {
+	{
+		.init = stm32_hash_init,
+		.update = stm32_hash_update,
+		.final = stm32_hash_final,
+		.finup = stm32_hash_finup,
+		.digest = stm32_hash_digest,
+		.export = stm32_hash_export,
+		.import = stm32_hash_import,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct stm32_hash_request_ctx),
+			.base = {
+				.cra_name = "sha224",
+				.cra_driver_name = "stm32-sha224",
+				.cra_priority = 200,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
+				.cra_alignmask = 3,
+				.cra_init = stm32_hash_cra_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = stm32_hash_init,
+		.update = stm32_hash_update,
+		.final = stm32_hash_final,
+		.finup = stm32_hash_finup,
+		.digest = stm32_hash_digest,
+		.setkey = stm32_hash_setkey,
+		.export = stm32_hash_export,
+		.import = stm32_hash_import,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct stm32_hash_request_ctx),
+			.base = {
+				.cra_name = "hmac(sha224)",
+				.cra_driver_name = "stm32-hmac-sha224",
+				.cra_priority = 200,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
+				.cra_alignmask = 3,
+				.cra_init = stm32_hash_cra_sha224_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = stm32_hash_init,
+		.update = stm32_hash_update,
+		.final = stm32_hash_final,
+		.finup = stm32_hash_finup,
+		.digest = stm32_hash_digest,
+		.export = stm32_hash_export,
+		.import = stm32_hash_import,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct stm32_hash_request_ctx),
+			.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "stm32-sha256",
+				.cra_priority = 200,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
+				.cra_alignmask = 3,
+				.cra_init = stm32_hash_cra_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.init = stm32_hash_init,
+		.update = stm32_hash_update,
+		.final = stm32_hash_final,
+		.finup = stm32_hash_finup,
+		.digest = stm32_hash_digest,
+		.export = stm32_hash_export,
+		.import = stm32_hash_import,
+		.setkey = stm32_hash_setkey,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct stm32_hash_request_ctx),
+			.base = {
+				.cra_name = "hmac(sha256)",
+				.cra_driver_name = "stm32-hmac-sha256",
+				.cra_priority = 200,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
+				.cra_alignmask = 3,
+				.cra_init = stm32_hash_cra_sha256_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+};
+
+static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
+{
+	unsigned int i, j;
+	int err;
+
+	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
+		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
+			err = crypto_register_ahash(
+				&hdev->pdata->algs_info[i].algs_list[j]);
+			if (err)
+				goto err_algs;
+		}
+	}
+
+	return 0;
+err_algs:
+	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
+	for (; i--; ) {
+		for (; j--;)
+			crypto_unregister_ahash(
+				&hdev->pdata->algs_info[i].algs_list[j]);
+	}
+
+	return err;
+}
+
+static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
+		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
+			crypto_unregister_ahash(
+				&hdev->pdata->algs_info[i].algs_list[j]);
+	}
+
+	return 0;
+}
+
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
+	{
+		.algs_list	= algs_md5_sha1,
+		.size		= ARRAY_SIZE(algs_md5_sha1),
+	},
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
+	.algs_info	= stm32_hash_algs_info_stm32f4,
+	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
+};
+
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
+	{
+		.algs_list	= algs_md5_sha1,
+		.size		= ARRAY_SIZE(algs_md5_sha1),
+	},
+	{
+		.algs_list	= algs_sha224_sha256,
+		.size		= ARRAY_SIZE(algs_sha224_sha256),
+	},
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
+	.algs_info	= stm32_hash_algs_info_stm32f7,
+	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
+};
+
+static const struct of_device_id stm32_hash_of_match[] = {
+	{
+		.compatible = "st,stm32f456-hash",
+		.data = &stm32_hash_pdata_stm32f4,
+	},
+	{
+		.compatible = "st,stm32f756-hash",
+		.data = &stm32_hash_pdata_stm32f7,
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
+
+static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
+				   struct device *dev)
+{
+	hdev->pdata = of_device_get_match_data(dev);
+	if (!hdev->pdata) {
+		dev_err(dev, "no compatible OF match\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32(dev->of_node, "dma-maxburst",
+				 &hdev->dma_maxburst)) {
+		dev_info(dev, "dma-maxburst not specified, using 0\n");
+		hdev->dma_maxburst = 0;
+	}
+
+	return 0;
+}
+
+static int stm32_hash_probe(struct platform_device *pdev)
+{
+	struct stm32_hash_dev *hdev;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret, irq;
+
+	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
+	if (!hdev)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hdev->io_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hdev->io_base))
+		return PTR_ERR(hdev->io_base);
+
+	hdev->phys_base = res->start;
+
+	ret = stm32_hash_get_of_match(hdev, dev);
+	if (ret)
+		return ret;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "Cannot get IRQ resource\n");
+		return irq;
+	}
+
+	ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
+					stm32_hash_irq_thread, IRQF_ONESHOT,
+					dev_name(dev), hdev);
+	if (ret) {
+		dev_err(dev, "Cannot grab IRQ\n");
+		return ret;
+	}
+
+	hdev->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(hdev->clk)) {
+		dev_err(dev, "failed to get clock for hash (%lu)\n",
+			PTR_ERR(hdev->clk));
+		return PTR_ERR(hdev->clk);
+	}
+
+	ret = clk_prepare_enable(hdev->clk);
+	if (ret) {
+		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
+		return ret;
+	}
+
+	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(dev);
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
+	if (!IS_ERR(hdev->rst)) {
+		reset_control_assert(hdev->rst);
+		udelay(2);
+		reset_control_deassert(hdev->rst);
+	}
+
+	hdev->dev = dev;
+
+	platform_set_drvdata(pdev, hdev);
+
+	ret = stm32_hash_dma_init(hdev);
+	if (ret)
+		dev_dbg(dev, "DMA mode not available\n");
+
+	spin_lock(&stm32_hash.lock);
+	list_add_tail(&hdev->list, &stm32_hash.dev_list);
+	spin_unlock(&stm32_hash.lock);
+
+	/* Initialize crypto engine */
+	hdev->engine = crypto_engine_alloc_init(dev, 1);
+	if (!hdev->engine) {
+		ret = -ENOMEM;
+		goto err_engine;
+	}
+
+	ret = crypto_engine_start(hdev->engine);
+	if (ret)
+		goto err_engine_start;
+
+	hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+
+	/* Register algos */
+	ret = stm32_hash_register_algs(hdev);
+	if (ret)
+		goto err_algs;
+
+	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
+		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
+
+	pm_runtime_put_sync(dev);
+
+	return 0;
+
+err_algs:
+err_engine_start:
+	crypto_engine_exit(hdev->engine);
+err_engine:
+	spin_lock(&stm32_hash.lock);
+	list_del(&hdev->list);
+	spin_unlock(&stm32_hash.lock);
+
+	if (hdev->dma_lch)
+		dma_release_channel(hdev->dma_lch);
+
+	pm_runtime_disable(dev);
+	pm_runtime_put_noidle(dev);
+
+	clk_disable_unprepare(hdev->clk);
+
+	return ret;
+}
+
+static int stm32_hash_remove(struct platform_device *pdev)
+{
+	static struct stm32_hash_dev *hdev;
+	int ret;
+
+	hdev = platform_get_drvdata(pdev);
+	if (!hdev)
+		return -ENODEV;
+
+	ret = pm_runtime_get_sync(hdev->dev);
+	if (ret < 0)
+		return ret;
+
+	stm32_hash_unregister_algs(hdev);
+
+	crypto_engine_exit(hdev->engine);
+
+	spin_lock(&stm32_hash.lock);
+	list_del(&hdev->list);
+	spin_unlock(&stm32_hash.lock);
+
+	if (hdev->dma_lch)
+		dma_release_channel(hdev->dma_lch);
+
+	pm_runtime_disable(hdev->dev);
+	pm_runtime_put_noidle(hdev->dev);
+
+	clk_disable_unprepare(hdev->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_hash_runtime_suspend(struct device *dev)
+{
+	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(hdev->clk);
+
+	return 0;
+}
+
+static int stm32_hash_runtime_resume(struct device *dev)
+{
+	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(hdev->clk);
+	if (ret) {
+		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_hash_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
+			   stm32_hash_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_hash_driver = {
+	.probe		= stm32_hash_probe,
+	.remove		= stm32_hash_remove,
+	.driver		= {
+		.name	= "stm32-hash",
+		.pm = &stm32_hash_pm_ops,
+		.of_match_table	= stm32_hash_of_match,
+	}
+};
+
+module_platform_driver(stm32_hash_driver);
+
+MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
+MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
new file mode 100644
index 0000000..29d2095
--- /dev/null
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/crc32poly.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME             "stm32-crc32"
+#define CHKSUM_DIGEST_SIZE      4
+#define CHKSUM_BLOCK_SIZE       1
+
+/* Registers */
+#define CRC_DR                  0x00000000
+#define CRC_CR                  0x00000008
+#define CRC_INIT                0x00000010
+#define CRC_POL                 0x00000014
+
+/* Registers values */
+#define CRC_CR_RESET            BIT(0)
+#define CRC_CR_REVERSE          (BIT(7) | BIT(6) | BIT(5))
+#define CRC_INIT_DEFAULT        0xFFFFFFFF
+
+#define CRC_AUTOSUSPEND_DELAY	50
+
+struct stm32_crc {
+	struct list_head list;
+	struct device    *dev;
+	void __iomem     *regs;
+	struct clk       *clk;
+	u8               pending_data[sizeof(u32)];
+	size_t           nb_pending_bytes;
+};
+
+struct stm32_crc_list {
+	struct list_head dev_list;
+	spinlock_t       lock; /* protect dev_list */
+};
+
+static struct stm32_crc_list crc_list = {
+	.dev_list = LIST_HEAD_INIT(crc_list.dev_list),
+	.lock     = __SPIN_LOCK_UNLOCKED(crc_list.lock),
+};
+
+struct stm32_crc_ctx {
+	u32 key;
+	u32 poly;
+};
+
+struct stm32_crc_desc_ctx {
+	u32    partial; /* crc32c: partial in first 4 bytes of that struct */
+	struct stm32_crc *crc;
+};
+
+static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
+{
+	struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
+
+	mctx->key = CRC_INIT_DEFAULT;
+	mctx->poly = CRC32_POLY_LE;
+	return 0;
+}
+
+static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
+{
+	struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
+
+	mctx->key = CRC_INIT_DEFAULT;
+	mctx->poly = CRC32C_POLY_LE;
+	return 0;
+}
+
+static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
+			    unsigned int keylen)
+{
+	struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
+
+	if (keylen != sizeof(u32)) {
+		crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	mctx->key = get_unaligned_le32(key);
+	return 0;
+}
+
+static int stm32_crc_init(struct shash_desc *desc)
+{
+	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+	struct stm32_crc *crc;
+
+	spin_lock_bh(&crc_list.lock);
+	list_for_each_entry(crc, &crc_list.dev_list, list) {
+		ctx->crc = crc;
+		break;
+	}
+	spin_unlock_bh(&crc_list.lock);
+
+	pm_runtime_get_sync(ctx->crc->dev);
+
+	/* Reset, set key, poly and configure in bit reverse mode */
+	writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
+	writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
+	writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+
+	/* Store partial result */
+	ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
+	ctx->crc->nb_pending_bytes = 0;
+
+	pm_runtime_mark_last_busy(ctx->crc->dev);
+	pm_runtime_put_autosuspend(ctx->crc->dev);
+
+	return 0;
+}
+
+static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
+			    unsigned int length)
+{
+	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+	struct stm32_crc *crc = ctx->crc;
+	u32 *d32;
+	unsigned int i;
+
+	pm_runtime_get_sync(crc->dev);
+
+	if (unlikely(crc->nb_pending_bytes)) {
+		while (crc->nb_pending_bytes != sizeof(u32) && length) {
+			/* Fill in pending data */
+			crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+			length--;
+		}
+
+		if (crc->nb_pending_bytes == sizeof(u32)) {
+			/* Process completed pending data */
+			writel_relaxed(*(u32 *)crc->pending_data,
+				       crc->regs + CRC_DR);
+			crc->nb_pending_bytes = 0;
+		}
+	}
+
+	d32 = (u32 *)d8;
+	for (i = 0; i < length >> 2; i++)
+		/* Process 32 bits data */
+		writel_relaxed(*(d32++), crc->regs + CRC_DR);
+
+	/* Store partial result */
+	ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+
+	pm_runtime_mark_last_busy(crc->dev);
+	pm_runtime_put_autosuspend(crc->dev);
+
+	/* Check for pending data (non 32 bits) */
+	length &= 3;
+	if (likely(!length))
+		return 0;
+
+	if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
+		/* Shall not happen */
+		dev_err(crc->dev, "Pending data overflow\n");
+		return -EINVAL;
+	}
+
+	d8 = (const u8 *)d32;
+	for (i = 0; i < length; i++)
+		/* Store pending data */
+		crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+
+	return 0;
+}
+
+static int stm32_crc_final(struct shash_desc *desc, u8 *out)
+{
+	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+	/* Send computed CRC */
+	put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
+			   ~ctx->partial : ctx->partial, out);
+
+	return 0;
+}
+
+static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
+			   unsigned int length, u8 *out)
+{
+	return stm32_crc_update(desc, data, length) ?:
+	       stm32_crc_final(desc, out);
+}
+
+static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
+			    unsigned int length, u8 *out)
+{
+	return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
+}
+
+static struct shash_alg algs[] = {
+	/* CRC-32 */
+	{
+		.setkey         = stm32_crc_setkey,
+		.init           = stm32_crc_init,
+		.update         = stm32_crc_update,
+		.final          = stm32_crc_final,
+		.finup          = stm32_crc_finup,
+		.digest         = stm32_crc_digest,
+		.descsize       = sizeof(struct stm32_crc_desc_ctx),
+		.digestsize     = CHKSUM_DIGEST_SIZE,
+		.base           = {
+			.cra_name               = "crc32",
+			.cra_driver_name        = DRIVER_NAME,
+			.cra_priority           = 200,
+			.cra_flags		= CRYPTO_ALG_OPTIONAL_KEY,
+			.cra_blocksize          = CHKSUM_BLOCK_SIZE,
+			.cra_alignmask          = 3,
+			.cra_ctxsize            = sizeof(struct stm32_crc_ctx),
+			.cra_module             = THIS_MODULE,
+			.cra_init               = stm32_crc32_cra_init,
+		}
+	},
+	/* CRC-32Castagnoli */
+	{
+		.setkey         = stm32_crc_setkey,
+		.init           = stm32_crc_init,
+		.update         = stm32_crc_update,
+		.final          = stm32_crc_final,
+		.finup          = stm32_crc_finup,
+		.digest         = stm32_crc_digest,
+		.descsize       = sizeof(struct stm32_crc_desc_ctx),
+		.digestsize     = CHKSUM_DIGEST_SIZE,
+		.base           = {
+			.cra_name               = "crc32c",
+			.cra_driver_name        = DRIVER_NAME,
+			.cra_priority           = 200,
+			.cra_flags		= CRYPTO_ALG_OPTIONAL_KEY,
+			.cra_blocksize          = CHKSUM_BLOCK_SIZE,
+			.cra_alignmask          = 3,
+			.cra_ctxsize            = sizeof(struct stm32_crc_ctx),
+			.cra_module             = THIS_MODULE,
+			.cra_init               = stm32_crc32c_cra_init,
+		}
+	}
+};
+
+static int stm32_crc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct stm32_crc *crc;
+	struct resource *res;
+	int ret;
+
+	crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
+	if (!crc)
+		return -ENOMEM;
+
+	crc->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	crc->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(crc->regs)) {
+		dev_err(dev, "Cannot map CRC IO\n");
+		return PTR_ERR(crc->regs);
+	}
+
+	crc->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(crc->clk)) {
+		dev_err(dev, "Could not get clock\n");
+		return PTR_ERR(crc->clk);
+	}
+
+	ret = clk_prepare_enable(crc->clk);
+	if (ret) {
+		dev_err(crc->dev, "Failed to enable clock\n");
+		return ret;
+	}
+
+	pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(dev);
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	platform_set_drvdata(pdev, crc);
+
+	spin_lock(&crc_list.lock);
+	list_add(&crc->list, &crc_list.dev_list);
+	spin_unlock(&crc_list.lock);
+
+	ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+	if (ret) {
+		dev_err(dev, "Failed to register\n");
+		clk_disable_unprepare(crc->clk);
+		return ret;
+	}
+
+	dev_info(dev, "Initialized\n");
+
+	pm_runtime_put_sync(dev);
+
+	return 0;
+}
+
+static int stm32_crc_remove(struct platform_device *pdev)
+{
+	struct stm32_crc *crc = platform_get_drvdata(pdev);
+	int ret = pm_runtime_get_sync(crc->dev);
+
+	if (ret < 0)
+		return ret;
+
+	spin_lock(&crc_list.lock);
+	list_del(&crc->list);
+	spin_unlock(&crc_list.lock);
+
+	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+
+	pm_runtime_disable(crc->dev);
+	pm_runtime_put_noidle(crc->dev);
+
+	clk_disable_unprepare(crc->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_crc_runtime_suspend(struct device *dev)
+{
+	struct stm32_crc *crc = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(crc->clk);
+
+	return 0;
+}
+
+static int stm32_crc_runtime_resume(struct device *dev)
+{
+	struct stm32_crc *crc = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(crc->clk);
+	if (ret) {
+		dev_err(crc->dev, "Failed to prepare_enable clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_crc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
+			   stm32_crc_runtime_resume, NULL)
+};
+
+static const struct of_device_id stm32_dt_ids[] = {
+	{ .compatible = "st,stm32f7-crc", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static struct platform_driver stm32_crc_driver = {
+	.probe  = stm32_crc_probe,
+	.remove = stm32_crc_remove,
+	.driver = {
+		.name           = DRIVER_NAME,
+		.pm		= &stm32_crc_pm_ops,
+		.of_match_table = stm32_dt_ids,
+	},
+};
+
+module_platform_driver(stm32_crc_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
new file mode 100644
index 0000000..ccb8932
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o
+sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o
+sun4i-ss-$(CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG) += sun4i-ss-prng.o
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
new file mode 100644
index 0000000..5cf6474
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -0,0 +1,546 @@
+/*
+ * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits
+ * keysize in CBC and ECB mode.
+ * Add support also for DES and 3DES in CBC and ECB mode.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+
+static int sun4i_ss_opti_poll(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+	u32 mode = ctx->mode;
+	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+	u32 rx_cnt = SS_RX_DEFAULT;
+	u32 tx_cnt = 0;
+	u32 spaces;
+	u32 v;
+	int err = 0;
+	unsigned int i;
+	unsigned int ileft = areq->cryptlen;
+	unsigned int oleft = areq->cryptlen;
+	unsigned int todo;
+	struct sg_mapping_iter mi, mo;
+	unsigned int oi, oo; /* offset for in and out */
+	unsigned long flags;
+
+	if (!areq->cryptlen)
+		return 0;
+
+	if (!areq->iv) {
+		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+		return -EINVAL;
+	}
+
+	if (!areq->src || !areq->dst) {
+		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ss->slock, flags);
+
+	for (i = 0; i < op->keylen; i += 4)
+		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+	if (areq->iv) {
+		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+			v = *(u32 *)(areq->iv + i * 4);
+			writel(v, ss->base + SS_IV0 + i * 4);
+		}
+	}
+	writel(mode, ss->base + SS_CTL);
+
+	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+	sg_miter_next(&mi);
+	sg_miter_next(&mo);
+	if (!mi.addr || !mo.addr) {
+		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+		err = -EINVAL;
+		goto release_ss;
+	}
+
+	ileft = areq->cryptlen / 4;
+	oleft = areq->cryptlen / 4;
+	oi = 0;
+	oo = 0;
+	do {
+		todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+		if (todo) {
+			ileft -= todo;
+			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+			oi += todo * 4;
+		}
+		if (oi == mi.length) {
+			sg_miter_next(&mi);
+			oi = 0;
+		}
+
+		spaces = readl(ss->base + SS_FCSR);
+		rx_cnt = SS_RXFIFO_SPACES(spaces);
+		tx_cnt = SS_TXFIFO_SPACES(spaces);
+
+		todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+		if (todo) {
+			oleft -= todo;
+			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+			oo += todo * 4;
+		}
+		if (oo == mo.length) {
+			sg_miter_next(&mo);
+			oo = 0;
+		}
+	} while (oleft);
+
+	if (areq->iv) {
+		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+			v = readl(ss->base + SS_IV0 + i * 4);
+			*(u32 *)(areq->iv + i * 4) = v;
+		}
+	}
+
+release_ss:
+	sg_miter_stop(&mi);
+	sg_miter_stop(&mo);
+	writel(0, ss->base + SS_CTL);
+	spin_unlock_irqrestore(&ss->slock, flags);
+	return err;
+}
+
+/* Generic function that support SG with size not multiple of 4 */
+static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+	int no_chunk = 1;
+	struct scatterlist *in_sg = areq->src;
+	struct scatterlist *out_sg = areq->dst;
+	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+	u32 mode = ctx->mode;
+	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+	u32 rx_cnt = SS_RX_DEFAULT;
+	u32 tx_cnt = 0;
+	u32 v;
+	u32 spaces;
+	int err = 0;
+	unsigned int i;
+	unsigned int ileft = areq->cryptlen;
+	unsigned int oleft = areq->cryptlen;
+	unsigned int todo;
+	struct sg_mapping_iter mi, mo;
+	unsigned int oi, oo;	/* offset for in and out */
+	char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+	char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+	unsigned int ob = 0;	/* offset in buf */
+	unsigned int obo = 0;	/* offset in bufo*/
+	unsigned int obl = 0;	/* length of data in bufo */
+	unsigned long flags;
+
+	if (!areq->cryptlen)
+		return 0;
+
+	if (!areq->iv) {
+		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+		return -EINVAL;
+	}
+
+	if (!areq->src || !areq->dst) {
+		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * if we have only SGs with size multiple of 4,
+	 * we can use the SS optimized function
+	 */
+	while (in_sg && no_chunk == 1) {
+		if (in_sg->length % 4)
+			no_chunk = 0;
+		in_sg = sg_next(in_sg);
+	}
+	while (out_sg && no_chunk == 1) {
+		if (out_sg->length % 4)
+			no_chunk = 0;
+		out_sg = sg_next(out_sg);
+	}
+
+	if (no_chunk == 1)
+		return sun4i_ss_opti_poll(areq);
+
+	spin_lock_irqsave(&ss->slock, flags);
+
+	for (i = 0; i < op->keylen; i += 4)
+		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+	if (areq->iv) {
+		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+			v = *(u32 *)(areq->iv + i * 4);
+			writel(v, ss->base + SS_IV0 + i * 4);
+		}
+	}
+	writel(mode, ss->base + SS_CTL);
+
+	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+	sg_miter_next(&mi);
+	sg_miter_next(&mo);
+	if (!mi.addr || !mo.addr) {
+		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+		err = -EINVAL;
+		goto release_ss;
+	}
+	ileft = areq->cryptlen;
+	oleft = areq->cryptlen;
+	oi = 0;
+	oo = 0;
+
+	while (oleft) {
+		if (ileft) {
+			/*
+			 * todo is the number of consecutive 4byte word that we
+			 * can read from current SG
+			 */
+			todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+			if (todo && !ob) {
+				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
+					todo);
+				ileft -= todo * 4;
+				oi += todo * 4;
+			} else {
+				/*
+				 * not enough consecutive bytes, so we need to
+				 * linearize in buf. todo is in bytes
+				 * After that copy, if we have a multiple of 4
+				 * we need to be able to write all buf in one
+				 * pass, so it is why we min() with rx_cnt
+				 */
+				todo = min3(rx_cnt * 4 - ob, ileft,
+					    mi.length - oi);
+				memcpy(buf + ob, mi.addr + oi, todo);
+				ileft -= todo;
+				oi += todo;
+				ob += todo;
+				if (!(ob % 4)) {
+					writesl(ss->base + SS_RXFIFO, buf,
+						ob / 4);
+					ob = 0;
+				}
+			}
+			if (oi == mi.length) {
+				sg_miter_next(&mi);
+				oi = 0;
+			}
+		}
+
+		spaces = readl(ss->base + SS_FCSR);
+		rx_cnt = SS_RXFIFO_SPACES(spaces);
+		tx_cnt = SS_TXFIFO_SPACES(spaces);
+		dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
+			mode,
+			oi, mi.length, ileft, areq->cryptlen, rx_cnt,
+			oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
+
+		if (!tx_cnt)
+			continue;
+		/* todo in 4bytes word */
+		todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+		if (todo) {
+			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+			oleft -= todo * 4;
+			oo += todo * 4;
+			if (oo == mo.length) {
+				sg_miter_next(&mo);
+				oo = 0;
+			}
+		} else {
+			/*
+			 * read obl bytes in bufo, we read at maximum for
+			 * emptying the device
+			 */
+			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
+			obl = tx_cnt * 4;
+			obo = 0;
+			do {
+				/*
+				 * how many bytes we can copy ?
+				 * no more than remaining SG size
+				 * no more than remaining buffer
+				 * no need to test against oleft
+				 */
+				todo = min(mo.length - oo, obl - obo);
+				memcpy(mo.addr + oo, bufo + obo, todo);
+				oleft -= todo;
+				obo += todo;
+				oo += todo;
+				if (oo == mo.length) {
+					sg_miter_next(&mo);
+					oo = 0;
+				}
+			} while (obo < obl);
+			/* bufo must be fully used here */
+		}
+	}
+	if (areq->iv) {
+		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+			v = readl(ss->base + SS_IV0 + i * 4);
+			*(u32 *)(areq->iv + i * 4) = v;
+		}
+	}
+
+release_ss:
+	sg_miter_stop(&mi);
+	sg_miter_stop(&mo);
+	writel(0, ss->base + SS_CTL);
+	spin_unlock_irqrestore(&ss->slock, flags);
+
+	return err;
+}
+
+/* CBC AES */
+int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB AES */
+int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC DES */
+int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB DES */
+int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC 3DES */
+int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB 3DES */
+int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
+{
+	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+	struct sun4i_ss_alg_template *algt;
+
+	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
+
+	algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
+			    alg.crypto.base);
+	op->ss = algt->ss;
+
+	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+				    sizeof(struct sun4i_cipher_req_ctx));
+
+	return 0;
+}
+
+/* check and set the AES key, prepare the mode to be used */
+int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+
+	switch (keylen) {
+	case 128 / 8:
+		op->keymode = SS_AES_128BITS;
+		break;
+	case 192 / 8:
+		op->keymode = SS_AES_192BITS;
+		break;
+	case 256 / 8:
+		op->keymode = SS_AES_256BITS;
+		break;
+	default:
+		dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	op->keylen = keylen;
+	memcpy(op->key, key, keylen);
+	return 0;
+}
+
+/* check and set the DES key, prepare the mode to be used */
+int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+	u32 flags;
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (unlikely(keylen != DES_KEY_SIZE)) {
+		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	flags = crypto_skcipher_get_flags(tfm);
+
+	ret = des_ekey(tmp, key);
+	if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+		dev_dbg(ss->dev, "Weak key %u\n", keylen);
+		return -EINVAL;
+	}
+
+	op->keylen = keylen;
+	memcpy(op->key, key, keylen);
+	return 0;
+}
+
+/* check and set the 3DES key, prepare the mode to be used */
+int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			 unsigned int keylen)
+{
+	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+
+	if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	op->keylen = keylen;
+	memcpy(op->key, key, keylen);
+	return 0;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
new file mode 100644
index 0000000..89adf9e
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -0,0 +1,447 @@
+/*
+ * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the SS.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+
+#include "sun4i-ss.h"
+
+static struct sun4i_ss_alg_template ss_algs[] = {
+{       .type = CRYPTO_ALG_TYPE_AHASH,
+	.mode = SS_OP_MD5,
+	.alg.hash = {
+		.init = sun4i_hash_init,
+		.update = sun4i_hash_update,
+		.final = sun4i_hash_final,
+		.finup = sun4i_hash_finup,
+		.digest = sun4i_hash_digest,
+		.export = sun4i_hash_export_md5,
+		.import = sun4i_hash_import_md5,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct md5_state),
+			.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "md5-sun4i-ss",
+				.cra_priority = 300,
+				.cra_alignmask = 3,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+				.cra_module = THIS_MODULE,
+				.cra_init = sun4i_hash_crainit
+			}
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_AHASH,
+	.mode = SS_OP_SHA1,
+	.alg.hash = {
+		.init = sun4i_hash_init,
+		.update = sun4i_hash_update,
+		.final = sun4i_hash_final,
+		.finup = sun4i_hash_finup,
+		.digest = sun4i_hash_digest,
+		.export = sun4i_hash_export_sha1,
+		.import = sun4i_hash_import_sha1,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct sha1_state),
+			.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "sha1-sun4i-ss",
+				.cra_priority = 300,
+				.cra_alignmask = 3,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+				.cra_module = THIS_MODULE,
+				.cra_init = sun4i_hash_crainit
+			}
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_SKCIPHER,
+	.alg.crypto = {
+		.setkey         = sun4i_ss_aes_setkey,
+		.encrypt        = sun4i_ss_cbc_aes_encrypt,
+		.decrypt        = sun4i_ss_cbc_aes_decrypt,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "cbc-aes-sun4i-ss",
+			.cra_priority = 300,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+			.cra_module = THIS_MODULE,
+			.cra_alignmask = 3,
+			.cra_init = sun4i_ss_cipher_init,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_SKCIPHER,
+	.alg.crypto = {
+		.setkey         = sun4i_ss_aes_setkey,
+		.encrypt        = sun4i_ss_ecb_aes_encrypt,
+		.decrypt        = sun4i_ss_ecb_aes_decrypt,
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "ecb-aes-sun4i-ss",
+			.cra_priority = 300,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+			.cra_module = THIS_MODULE,
+			.cra_alignmask = 3,
+			.cra_init = sun4i_ss_cipher_init,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_SKCIPHER,
+	.alg.crypto = {
+		.setkey         = sun4i_ss_des_setkey,
+		.encrypt        = sun4i_ss_cbc_des_encrypt,
+		.decrypt        = sun4i_ss_cbc_des_decrypt,
+		.min_keysize    = DES_KEY_SIZE,
+		.max_keysize    = DES_KEY_SIZE,
+		.ivsize         = DES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "cbc-des-sun4i-ss",
+			.cra_priority = 300,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+			.cra_module = THIS_MODULE,
+			.cra_alignmask = 3,
+			.cra_init = sun4i_ss_cipher_init,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_SKCIPHER,
+	.alg.crypto = {
+		.setkey         = sun4i_ss_des_setkey,
+		.encrypt        = sun4i_ss_ecb_des_encrypt,
+		.decrypt        = sun4i_ss_ecb_des_decrypt,
+		.min_keysize    = DES_KEY_SIZE,
+		.max_keysize    = DES_KEY_SIZE,
+		.base = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "ecb-des-sun4i-ss",
+			.cra_priority = 300,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+			.cra_module = THIS_MODULE,
+			.cra_alignmask = 3,
+			.cra_init = sun4i_ss_cipher_init,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_SKCIPHER,
+	.alg.crypto = {
+		.setkey         = sun4i_ss_des3_setkey,
+		.encrypt        = sun4i_ss_cbc_des3_encrypt,
+		.decrypt        = sun4i_ss_cbc_des3_decrypt,
+		.min_keysize    = DES3_EDE_KEY_SIZE,
+		.max_keysize    = DES3_EDE_KEY_SIZE,
+		.ivsize         = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "cbc-des3-sun4i-ss",
+			.cra_priority = 300,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+			.cra_module = THIS_MODULE,
+			.cra_alignmask = 3,
+			.cra_init = sun4i_ss_cipher_init,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_SKCIPHER,
+	.alg.crypto = {
+		.setkey         = sun4i_ss_des3_setkey,
+		.encrypt        = sun4i_ss_ecb_des3_encrypt,
+		.decrypt        = sun4i_ss_ecb_des3_decrypt,
+		.min_keysize    = DES3_EDE_KEY_SIZE,
+		.max_keysize    = DES3_EDE_KEY_SIZE,
+		.ivsize         = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "ecb-des3-sun4i-ss",
+			.cra_priority = 300,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+			.cra_module = THIS_MODULE,
+			.cra_alignmask = 3,
+			.cra_init = sun4i_ss_cipher_init,
+		}
+	}
+},
+#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+{
+	.type = CRYPTO_ALG_TYPE_RNG,
+	.alg.rng = {
+		.base = {
+			.cra_name		= "stdrng",
+			.cra_driver_name	= "sun4i_ss_rng",
+			.cra_priority		= 300,
+			.cra_ctxsize		= 0,
+			.cra_module		= THIS_MODULE,
+		},
+		.generate               = sun4i_ss_prng_generate,
+		.seed                   = sun4i_ss_prng_seed,
+		.seedsize               = SS_SEED_LEN / BITS_PER_BYTE,
+	}
+},
+#endif
+};
+
+static int sun4i_ss_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	u32 v;
+	int err, i;
+	unsigned long cr;
+	const unsigned long cr_ahb = 24 * 1000 * 1000;
+	const unsigned long cr_mod = 150 * 1000 * 1000;
+	struct sun4i_ss_ctx *ss;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+	if (!ss)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ss->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ss->base)) {
+		dev_err(&pdev->dev, "Cannot request MMIO\n");
+		return PTR_ERR(ss->base);
+	}
+
+	ss->ssclk = devm_clk_get(&pdev->dev, "mod");
+	if (IS_ERR(ss->ssclk)) {
+		err = PTR_ERR(ss->ssclk);
+		dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err);
+		return err;
+	}
+	dev_dbg(&pdev->dev, "clock ss acquired\n");
+
+	ss->busclk = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(ss->busclk)) {
+		err = PTR_ERR(ss->busclk);
+		dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err);
+		return err;
+	}
+	dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
+
+	ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
+	if (IS_ERR(ss->reset)) {
+		if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+			return PTR_ERR(ss->reset);
+		dev_info(&pdev->dev, "no reset control found\n");
+		ss->reset = NULL;
+	}
+
+	/* Enable both clocks */
+	err = clk_prepare_enable(ss->busclk);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+		return err;
+	}
+	err = clk_prepare_enable(ss->ssclk);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
+		goto error_ssclk;
+	}
+
+	/*
+	 * Check that clock have the correct rates given in the datasheet
+	 * Try to set the clock to the maximum allowed
+	 */
+	err = clk_set_rate(ss->ssclk, cr_mod);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
+		goto error_clk;
+	}
+
+	/* Deassert reset if we have a reset control */
+	if (ss->reset) {
+		err = reset_control_deassert(ss->reset);
+		if (err) {
+			dev_err(&pdev->dev, "Cannot deassert reset control\n");
+			goto error_clk;
+		}
+	}
+
+	/*
+	 * The only impact on clocks below requirement are bad performance,
+	 * so do not print "errors"
+	 * warn on Overclocked clocks
+	 */
+	cr = clk_get_rate(ss->busclk);
+	if (cr >= cr_ahb)
+		dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+			cr, cr / 1000000, cr_ahb);
+	else
+		dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+			 cr, cr / 1000000, cr_ahb);
+
+	cr = clk_get_rate(ss->ssclk);
+	if (cr <= cr_mod)
+		if (cr < cr_mod)
+			dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+				 cr, cr / 1000000, cr_mod);
+		else
+			dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+				cr, cr / 1000000, cr_mod);
+	else
+		dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
+			 cr, cr / 1000000, cr_mod);
+
+	/*
+	 * Datasheet named it "Die Bonding ID"
+	 * I expect to be a sort of Security System Revision number.
+	 * Since the A80 seems to have an other version of SS
+	 * this info could be useful
+	 */
+	writel(SS_ENABLED, ss->base + SS_CTL);
+	v = readl(ss->base + SS_CTL);
+	v >>= 16;
+	v &= 0x07;
+	dev_info(&pdev->dev, "Die ID %d\n", v);
+	writel(0, ss->base + SS_CTL);
+
+	ss->dev = &pdev->dev;
+
+	spin_lock_init(&ss->slock);
+
+	for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+		ss_algs[i].ss = ss;
+		switch (ss_algs[i].type) {
+		case CRYPTO_ALG_TYPE_SKCIPHER:
+			err = crypto_register_skcipher(&ss_algs[i].alg.crypto);
+			if (err) {
+				dev_err(ss->dev, "Fail to register %s\n",
+					ss_algs[i].alg.crypto.base.cra_name);
+				goto error_alg;
+			}
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			err = crypto_register_ahash(&ss_algs[i].alg.hash);
+			if (err) {
+				dev_err(ss->dev, "Fail to register %s\n",
+					ss_algs[i].alg.hash.halg.base.cra_name);
+				goto error_alg;
+			}
+			break;
+		case CRYPTO_ALG_TYPE_RNG:
+			err = crypto_register_rng(&ss_algs[i].alg.rng);
+			if (err) {
+				dev_err(ss->dev, "Fail to register %s\n",
+					ss_algs[i].alg.rng.base.cra_name);
+			}
+			break;
+		}
+	}
+	platform_set_drvdata(pdev, ss);
+	return 0;
+error_alg:
+	i--;
+	for (; i >= 0; i--) {
+		switch (ss_algs[i].type) {
+		case CRYPTO_ALG_TYPE_SKCIPHER:
+			crypto_unregister_skcipher(&ss_algs[i].alg.crypto);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&ss_algs[i].alg.hash);
+			break;
+		case CRYPTO_ALG_TYPE_RNG:
+			crypto_unregister_rng(&ss_algs[i].alg.rng);
+			break;
+		}
+	}
+	if (ss->reset)
+		reset_control_assert(ss->reset);
+error_clk:
+	clk_disable_unprepare(ss->ssclk);
+error_ssclk:
+	clk_disable_unprepare(ss->busclk);
+	return err;
+}
+
+static int sun4i_ss_remove(struct platform_device *pdev)
+{
+	int i;
+	struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev);
+
+	for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+		switch (ss_algs[i].type) {
+		case CRYPTO_ALG_TYPE_SKCIPHER:
+			crypto_unregister_skcipher(&ss_algs[i].alg.crypto);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&ss_algs[i].alg.hash);
+			break;
+		case CRYPTO_ALG_TYPE_RNG:
+			crypto_unregister_rng(&ss_algs[i].alg.rng);
+			break;
+		}
+	}
+
+	writel(0, ss->base + SS_CTL);
+	if (ss->reset)
+		reset_control_assert(ss->reset);
+	clk_disable_unprepare(ss->busclk);
+	clk_disable_unprepare(ss->ssclk);
+	return 0;
+}
+
+static const struct of_device_id a20ss_crypto_of_match_table[] = {
+	{ .compatible = "allwinner,sun4i-a10-crypto" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
+
+static struct platform_driver sun4i_ss_driver = {
+	.probe          = sun4i_ss_probe,
+	.remove         = sun4i_ss_remove,
+	.driver         = {
+		.name           = "sun4i-ss",
+		.of_match_table	= a20ss_crypto_of_match_table,
+	},
+};
+
+module_platform_driver(sun4i_ss_driver);
+
+MODULE_ALIAS("platform:sun4i-ss");
+MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
new file mode 100644
index 0000000..a4b5ff2
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -0,0 +1,523 @@
+/*
+ * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for MD5 and SHA1.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+#include <linux/scatterlist.h>
+
+/* This is a totally arbitrary value */
+#define SS_TIMEOUT 100
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm)
+{
+	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+	struct sun4i_ss_alg_template *algt;
+
+	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
+
+	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+	op->ss = algt->ss;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct sun4i_req_ctx));
+	return 0;
+}
+
+/* sun4i_hash_init: initialize request context */
+int sun4i_hash_init(struct ahash_request *areq)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+	struct sun4i_ss_alg_template *algt;
+
+	memset(op, 0, sizeof(struct sun4i_req_ctx));
+
+	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+	op->mode = algt->mode;
+
+	return 0;
+}
+
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct md5_state *octx = out;
+	int i;
+
+	octx->byte_count = op->byte_count + op->len;
+
+	memcpy(octx->block, op->buf, op->len);
+
+	if (op->byte_count) {
+		for (i = 0; i < 4; i++)
+			octx->hash[i] = op->hash[i];
+	} else {
+		octx->hash[0] = SHA1_H0;
+		octx->hash[1] = SHA1_H1;
+		octx->hash[2] = SHA1_H2;
+		octx->hash[3] = SHA1_H3;
+	}
+
+	return 0;
+}
+
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	const struct md5_state *ictx = in;
+	int i;
+
+	sun4i_hash_init(areq);
+
+	op->byte_count = ictx->byte_count & ~0x3F;
+	op->len = ictx->byte_count & 0x3F;
+
+	memcpy(op->buf, ictx->block, op->len);
+
+	for (i = 0; i < 4; i++)
+		op->hash[i] = ictx->hash[i];
+
+	return 0;
+}
+
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct sha1_state *octx = out;
+	int i;
+
+	octx->count = op->byte_count + op->len;
+
+	memcpy(octx->buffer, op->buf, op->len);
+
+	if (op->byte_count) {
+		for (i = 0; i < 5; i++)
+			octx->state[i] = op->hash[i];
+	} else {
+		octx->state[0] = SHA1_H0;
+		octx->state[1] = SHA1_H1;
+		octx->state[2] = SHA1_H2;
+		octx->state[3] = SHA1_H3;
+		octx->state[4] = SHA1_H4;
+	}
+
+	return 0;
+}
+
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	const struct sha1_state *ictx = in;
+	int i;
+
+	sun4i_hash_init(areq);
+
+	op->byte_count = ictx->count & ~0x3F;
+	op->len = ictx->count & 0x3F;
+
+	memcpy(op->buf, ictx->buffer, op->len);
+
+	for (i = 0; i < 5; i++)
+		op->hash[i] = ictx->state[i];
+
+	return 0;
+}
+
+#define SS_HASH_UPDATE 1
+#define SS_HASH_FINAL 2
+
+/*
+ * sun4i_hash_update: update hash engine
+ *
+ * Could be used for both SHA1 and MD5
+ * Write data by step of 32bits and put then in the SS.
+ *
+ * Since we cannot leave partial data and hash state in the engine,
+ * we need to get the hash state at the end of this function.
+ * We can get the hash state every 64 bytes
+ *
+ * So the first work is to get the number of bytes to write to SS modulo 64
+ * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
+ *
+ * So at the begin of update()
+ * if op->len + areq->nbytes < 64
+ * => all data will be written to wait buffer (op->buf) and end=0
+ * if not, write all data from op->buf to the device and position end to
+ * complete to 64bytes
+ *
+ * example 1:
+ * update1 60o => op->len=60
+ * update2 60o => need one more word to have 64 bytes
+ * end=4
+ * so write all data from op->buf and one word of SGs
+ * write remaining data in op->buf
+ * final state op->len=56
+ */
+static int sun4i_hash(struct ahash_request *areq)
+{
+	/*
+	 * i is the total bytes read from SGs, to be compared to areq->nbytes
+	 * i is important because we cannot rely on SG length since the sum of
+	 * SG->length could be greater than areq->nbytes
+	 *
+	 * end is the position when we need to stop writing to the device,
+	 * to be compared to i
+	 *
+	 * in_i: advancement in the current SG
+	 */
+	unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
+	unsigned int in_i = 0;
+	u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0;
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+	struct sun4i_ss_ctx *ss = tfmctx->ss;
+	struct scatterlist *in_sg = areq->src;
+	struct sg_mapping_iter mi;
+	int in_r, err = 0;
+	size_t copied = 0;
+
+	dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
+		__func__, crypto_tfm_alg_name(areq->base.tfm),
+		op->byte_count, areq->nbytes, op->mode,
+		op->len, op->hash[0]);
+
+	if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL))
+		return 0;
+
+	/* protect against overflow */
+	if (unlikely(areq->nbytes > UINT_MAX - op->len)) {
+		dev_err(ss->dev, "Cannot process too large request\n");
+		return -EINVAL;
+	}
+
+	if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) {
+		/* linearize data to op->buf */
+		copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+					    op->buf + op->len, areq->nbytes, 0);
+		op->len += copied;
+		return 0;
+	}
+
+	spin_lock_bh(&ss->slock);
+
+	/*
+	 * if some data have been processed before,
+	 * we need to restore the partial hash state
+	 */
+	if (op->byte_count) {
+		ivmode = SS_IV_ARBITRARY;
+		for (i = 0; i < 5; i++)
+			writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+	}
+	/* Enable the device */
+	writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+	if (!(op->flags & SS_HASH_UPDATE))
+		goto hash_final;
+
+	/* start of handling data */
+	if (!(op->flags & SS_HASH_FINAL)) {
+		end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
+
+		if (end > areq->nbytes || areq->nbytes - end > 63) {
+			dev_err(ss->dev, "ERROR: Bound error %u %u\n",
+				end, areq->nbytes);
+			err = -EINVAL;
+			goto release_ss;
+		}
+	} else {
+		/* Since we have the flag final, we can go up to modulo 4 */
+		end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+	}
+
+	/* TODO if SGlen % 4 and !op->len then DMA */
+	i = 1;
+	while (in_sg && i == 1) {
+		if (in_sg->length % 4)
+			i = 0;
+		in_sg = sg_next(in_sg);
+	}
+	if (i == 1 && !op->len && areq->nbytes)
+		dev_dbg(ss->dev, "We can DMA\n");
+
+	i = 0;
+	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+	sg_miter_next(&mi);
+	in_i = 0;
+
+	do {
+		/*
+		 * we need to linearize in two case:
+		 * - the buffer is already used
+		 * - the SG does not have enough byte remaining ( < 4)
+		 */
+		if (op->len || (mi.length - in_i) < 4) {
+			/*
+			 * if we have entered here we have two reason to stop
+			 * - the buffer is full
+			 * - reach the end
+			 */
+			while (op->len < 64 && i < end) {
+				/* how many bytes we can read from current SG */
+				in_r = min3(mi.length - in_i, end - i,
+					    64 - op->len);
+				memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+				op->len += in_r;
+				i += in_r;
+				in_i += in_r;
+				if (in_i == mi.length) {
+					sg_miter_next(&mi);
+					in_i = 0;
+				}
+			}
+			if (op->len > 3 && !(op->len % 4)) {
+				/* write buf to the device */
+				writesl(ss->base + SS_RXFIFO, op->buf,
+					op->len / 4);
+				op->byte_count += op->len;
+				op->len = 0;
+			}
+		}
+		if (mi.length - in_i > 3 && i < end) {
+			/* how many bytes we can read from current SG */
+			in_r = min3(mi.length - in_i, areq->nbytes - i,
+				    ((mi.length - in_i) / 4) * 4);
+			/* how many bytes we can write in the device*/
+			todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
+			writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
+			op->byte_count += todo * 4;
+			i += todo * 4;
+			in_i += todo * 4;
+			rx_cnt -= todo;
+			if (!rx_cnt) {
+				spaces = readl(ss->base + SS_FCSR);
+				rx_cnt = SS_RXFIFO_SPACES(spaces);
+			}
+			if (in_i == mi.length) {
+				sg_miter_next(&mi);
+				in_i = 0;
+			}
+		}
+	} while (i < end);
+
+	/*
+	 * Now we have written to the device all that we can,
+	 * store the remaining bytes in op->buf
+	 */
+	if ((areq->nbytes - i) < 64) {
+		while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
+			/* how many bytes we can read from current SG */
+			in_r = min3(mi.length - in_i, areq->nbytes - i,
+				    64 - op->len);
+			memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+			op->len += in_r;
+			i += in_r;
+			in_i += in_r;
+			if (in_i == mi.length) {
+				sg_miter_next(&mi);
+				in_i = 0;
+			}
+		}
+	}
+
+	sg_miter_stop(&mi);
+
+	/*
+	 * End of data process
+	 * Now if we have the flag final go to finalize part
+	 * If not, store the partial hash
+	 */
+	if (op->flags & SS_HASH_FINAL)
+		goto hash_final;
+
+	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+	i = 0;
+	do {
+		v = readl(ss->base + SS_CTL);
+		i++;
+	} while (i < SS_TIMEOUT && (v & SS_DATA_END));
+	if (unlikely(i >= SS_TIMEOUT)) {
+		dev_err_ratelimited(ss->dev,
+				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+				    i, SS_TIMEOUT, v, areq->nbytes);
+		err = -EIO;
+		goto release_ss;
+	}
+
+	/*
+	 * The datasheet isn't very clear about when to retrieve the digest. The
+	 * bit SS_DATA_END is cleared when the engine has processed the data and
+	 * when the digest is computed *but* it doesn't mean the digest is
+	 * available in the digest registers. Hence the delay to be sure we can
+	 * read it.
+	 */
+	ndelay(1);
+
+	for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+		op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
+
+	goto release_ss;
+
+/*
+ * hash_final: finalize hashing operation
+ *
+ * If we have some remaining bytes, we write them.
+ * Then ask the SS for finalizing the hashing operation
+ *
+ * I do not check RX FIFO size in this function since the size is 32
+ * after each enabling and this function neither write more than 32 words.
+ * If we come from the update part, we cannot have more than
+ * 3 remaining bytes to write and SS is fast enough to not care about it.
+ */
+
+hash_final:
+
+	/* write the remaining words of the wait buffer */
+	if (op->len) {
+		nwait = op->len / 4;
+		if (nwait) {
+			writesl(ss->base + SS_RXFIFO, op->buf, nwait);
+			op->byte_count += 4 * nwait;
+		}
+
+		nbw = op->len - 4 * nwait;
+		if (nbw) {
+			wb = *(u32 *)(op->buf + nwait * 4);
+			wb &= GENMASK((nbw * 8) - 1, 0);
+
+			op->byte_count += nbw;
+		}
+	}
+
+	/* write the remaining bytes of the nbw buffer */
+	wb |= ((1 << 7) << (nbw * 8));
+	bf[j++] = wb;
+
+	/*
+	 * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
+	 * I take the operations from other MD5/SHA1 implementations
+	 */
+
+	/* last block size */
+	fill = 64 - (op->byte_count % 64);
+	min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
+
+	/* if we can't fill all data, jump to the next 64 block */
+	if (fill < min_fill)
+		fill += 64;
+
+	j += (fill - min_fill) / sizeof(u32);
+
+	/* write the length of data */
+	if (op->mode == SS_OP_SHA1) {
+		__be64 bits = cpu_to_be64(op->byte_count << 3);
+		bf[j++] = lower_32_bits(bits);
+		bf[j++] = upper_32_bits(bits);
+	} else {
+		__le64 bits = op->byte_count << 3;
+		bf[j++] = lower_32_bits(bits);
+		bf[j++] = upper_32_bits(bits);
+	}
+	writesl(ss->base + SS_RXFIFO, bf, j);
+
+	/* Tell the SS to stop the hashing */
+	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+
+	/*
+	 * Wait for SS to finish the hash.
+	 * The timeout could happen only in case of bad overclocking
+	 * or driver bug.
+	 */
+	i = 0;
+	do {
+		v = readl(ss->base + SS_CTL);
+		i++;
+	} while (i < SS_TIMEOUT && (v & SS_DATA_END));
+	if (unlikely(i >= SS_TIMEOUT)) {
+		dev_err_ratelimited(ss->dev,
+				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+				    i, SS_TIMEOUT, v, areq->nbytes);
+		err = -EIO;
+		goto release_ss;
+	}
+
+	/*
+	 * The datasheet isn't very clear about when to retrieve the digest. The
+	 * bit SS_DATA_END is cleared when the engine has processed the data and
+	 * when the digest is computed *but* it doesn't mean the digest is
+	 * available in the digest registers. Hence the delay to be sure we can
+	 * read it.
+	 */
+	ndelay(1);
+
+	/* Get the hash from the device */
+	if (op->mode == SS_OP_SHA1) {
+		for (i = 0; i < 5; i++) {
+			v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+			memcpy(areq->result + i * 4, &v, 4);
+		}
+	} else {
+		for (i = 0; i < 4; i++) {
+			v = readl(ss->base + SS_MD0 + i * 4);
+			memcpy(areq->result + i * 4, &v, 4);
+		}
+	}
+
+release_ss:
+	writel(0, ss->base + SS_CTL);
+	spin_unlock_bh(&ss->slock);
+	return err;
+}
+
+int sun4i_hash_final(struct ahash_request *areq)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+
+	op->flags = SS_HASH_FINAL;
+	return sun4i_hash(areq);
+}
+
+int sun4i_hash_update(struct ahash_request *areq)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+
+	op->flags = SS_HASH_UPDATE;
+	return sun4i_hash(areq);
+}
+
+/* sun4i_hash_finup: finalize hashing operation after an update */
+int sun4i_hash_finup(struct ahash_request *areq)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+
+	op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
+	return sun4i_hash(areq);
+}
+
+/* combo of init/update/final functions */
+int sun4i_hash_digest(struct ahash_request *areq)
+{
+	int err;
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+
+	err = sun4i_hash_init(areq);
+	if (err)
+		return err;
+
+	op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
+	return sun4i_hash(areq);
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
new file mode 100644
index 0000000..63d6364
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
@@ -0,0 +1,56 @@
+#include "sun4i-ss.h"
+
+int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+		       unsigned int slen)
+{
+	struct sun4i_ss_alg_template *algt;
+	struct rng_alg *alg = crypto_rng_alg(tfm);
+
+	algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+	memcpy(algt->ss->seed, seed, slen);
+
+	return 0;
+}
+
+int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+			   unsigned int slen, u8 *dst, unsigned int dlen)
+{
+	struct sun4i_ss_alg_template *algt;
+	struct rng_alg *alg = crypto_rng_alg(tfm);
+	int i;
+	u32 v;
+	u32 *data = (u32 *)dst;
+	const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED;
+	size_t len;
+	struct sun4i_ss_ctx *ss;
+	unsigned int todo = (dlen / 4) * 4;
+
+	algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+	ss = algt->ss;
+
+	spin_lock_bh(&ss->slock);
+
+	writel(mode, ss->base + SS_CTL);
+
+	while (todo > 0) {
+		/* write the seed */
+		for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++)
+			writel(ss->seed[i], ss->base + SS_KEY0 + i * 4);
+
+		/* Read the random data */
+		len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo);
+		readsl(ss->base + SS_TXFIFO, data, len / 4);
+		data += len / 4;
+		todo -= len;
+
+		/* Update the seed */
+		for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) {
+			v = readl(ss->base + SS_KEY0 + i * 4);
+			ss->seed[i] = v;
+		}
+	}
+
+	writel(0, ss->base + SS_CTL);
+	spin_unlock_bh(&ss->slock);
+	return 0;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
new file mode 100644
index 0000000..f3ac906
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -0,0 +1,214 @@
+/*
+ * sun4i-ss.h - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Support AES cipher with 128,192,256 bits keysize.
+ * Support MD5 and SHA1 hash algorithms.
+ * Support DES and 3DES
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <crypto/md5.h>
+#include <crypto/skcipher.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
+
+#define SS_CTL            0x00
+#define SS_KEY0           0x04
+#define SS_KEY1           0x08
+#define SS_KEY2           0x0C
+#define SS_KEY3           0x10
+#define SS_KEY4           0x14
+#define SS_KEY5           0x18
+#define SS_KEY6           0x1C
+#define SS_KEY7           0x20
+
+#define SS_IV0            0x24
+#define SS_IV1            0x28
+#define SS_IV2            0x2C
+#define SS_IV3            0x30
+
+#define SS_FCSR           0x44
+
+#define SS_MD0            0x4C
+#define SS_MD1            0x50
+#define SS_MD2            0x54
+#define SS_MD3            0x58
+#define SS_MD4            0x5C
+
+#define SS_RXFIFO         0x200
+#define SS_TXFIFO         0x204
+
+/* SS_CTL configuration values */
+
+/* PRNG generator mode - bit 15 */
+#define SS_PRNG_ONESHOT		(0 << 15)
+#define SS_PRNG_CONTINUE	(1 << 15)
+
+/* IV mode for hash */
+#define SS_IV_ARBITRARY		(1 << 14)
+
+/* SS operation mode - bits 12-13 */
+#define SS_ECB			(0 << 12)
+#define SS_CBC			(1 << 12)
+#define SS_CTS			(3 << 12)
+
+/* Counter width for CNT mode - bits 10-11 */
+#define SS_CNT_16BITS		(0 << 10)
+#define SS_CNT_32BITS		(1 << 10)
+#define SS_CNT_64BITS		(2 << 10)
+
+/* Key size for AES - bits 8-9 */
+#define SS_AES_128BITS		(0 << 8)
+#define SS_AES_192BITS		(1 << 8)
+#define SS_AES_256BITS		(2 << 8)
+
+/* Operation direction - bit 7 */
+#define SS_ENCRYPTION		(0 << 7)
+#define SS_DECRYPTION		(1 << 7)
+
+/* SS Method - bits 4-6 */
+#define SS_OP_AES		(0 << 4)
+#define SS_OP_DES		(1 << 4)
+#define SS_OP_3DES		(2 << 4)
+#define SS_OP_SHA1		(3 << 4)
+#define SS_OP_MD5		(4 << 4)
+#define SS_OP_PRNG		(5 << 4)
+
+/* Data end bit - bit 2 */
+#define SS_DATA_END		(1 << 2)
+
+/* PRNG start bit - bit 1 */
+#define SS_PRNG_START		(1 << 1)
+
+/* SS Enable bit - bit 0 */
+#define SS_DISABLED		(0 << 0)
+#define SS_ENABLED		(1 << 0)
+
+/* SS_FCSR configuration values */
+/* RX FIFO status - bit 30 */
+#define SS_RXFIFO_FREE		(1 << 30)
+
+/* RX FIFO empty spaces - bits 24-29 */
+#define SS_RXFIFO_SPACES(val)	(((val) >> 24) & 0x3f)
+
+/* TX FIFO status - bit 22 */
+#define SS_TXFIFO_AVAILABLE	(1 << 22)
+
+/* TX FIFO available spaces - bits 16-21 */
+#define SS_TXFIFO_SPACES(val)	(((val) >> 16) & 0x3f)
+
+#define SS_RX_MAX	32
+#define SS_RX_DEFAULT	SS_RX_MAX
+#define SS_TX_MAX	33
+
+#define SS_RXFIFO_EMP_INT_PENDING	(1 << 10)
+#define SS_TXFIFO_AVA_INT_PENDING	(1 << 8)
+#define SS_RXFIFO_EMP_INT_ENABLE	(1 << 2)
+#define SS_TXFIFO_AVA_INT_ENABLE	(1 << 0)
+
+#define SS_SEED_LEN 192
+#define SS_DATA_LEN 160
+
+struct sun4i_ss_ctx {
+	void __iomem *base;
+	int irq;
+	struct clk *busclk;
+	struct clk *ssclk;
+	struct reset_control *reset;
+	struct device *dev;
+	struct resource *res;
+	spinlock_t slock; /* control the use of the device */
+#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+	u32 seed[SS_SEED_LEN / BITS_PER_LONG];
+#endif
+};
+
+struct sun4i_ss_alg_template {
+	u32 type;
+	u32 mode;
+	union {
+		struct skcipher_alg crypto;
+		struct ahash_alg hash;
+		struct rng_alg rng;
+	} alg;
+	struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_tfm_ctx {
+	u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */
+	u32 keylen;
+	u32 keymode;
+	struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_cipher_req_ctx {
+	u32 mode;
+};
+
+struct sun4i_req_ctx {
+	u32 mode;
+	u64 byte_count; /* number of bytes "uploaded" to the device */
+	u32 hash[5]; /* for storing SS_IVx register */
+	char buf[64];
+	unsigned int len;
+	int flags;
+};
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm);
+int sun4i_hash_init(struct ahash_request *areq);
+int sun4i_hash_update(struct ahash_request *areq);
+int sun4i_hash_final(struct ahash_request *areq);
+int sun4i_hash_finup(struct ahash_request *areq);
+int sun4i_hash_digest(struct ahash_request *areq);
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out);
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in);
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out);
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in);
+
+int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq);
+int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq);
+int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq);
+int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq);
+
+int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq);
+int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq);
+int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq);
+int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq);
+
+int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq);
+int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq);
+int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq);
+int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq);
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			unsigned int keylen);
+int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			unsigned int keylen);
+int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			 unsigned int keylen);
+int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+			   unsigned int slen, u8 *dst, unsigned int dlen);
+int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
new file mode 100644
index 0000000..6988012
--- /dev/null
+++ b/drivers/crypto/talitos.c
@@ -0,0 +1,3517 @@
+/*
+ * talitos - Freescale Integrated Security Engine (SEC) device driver
+ *
+ * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Scatterlist Crypto API glue code copied from files with the following:
+ * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Crypto algorithm registration code copied from hifn driver:
+ * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/hw_random.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+
+#include "talitos.h"
+
+static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
+			   unsigned int len, bool is_sec1)
+{
+	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
+	if (is_sec1) {
+		ptr->len1 = cpu_to_be16(len);
+	} else {
+		ptr->len = cpu_to_be16(len);
+		ptr->eptr = upper_32_bits(dma_addr);
+	}
+}
+
+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
+			     struct talitos_ptr *src_ptr, bool is_sec1)
+{
+	dst_ptr->ptr = src_ptr->ptr;
+	if (is_sec1) {
+		dst_ptr->len1 = src_ptr->len1;
+	} else {
+		dst_ptr->len = src_ptr->len;
+		dst_ptr->eptr = src_ptr->eptr;
+	}
+}
+
+static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
+					   bool is_sec1)
+{
+	if (is_sec1)
+		return be16_to_cpu(ptr->len1);
+	else
+		return be16_to_cpu(ptr->len);
+}
+
+static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
+				   bool is_sec1)
+{
+	if (!is_sec1)
+		ptr->j_extent = val;
+}
+
+static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
+{
+	if (!is_sec1)
+		ptr->j_extent |= val;
+}
+
+/*
+ * map virtual single (contiguous) pointer to h/w descriptor pointer
+ */
+static void __map_single_talitos_ptr(struct device *dev,
+				     struct talitos_ptr *ptr,
+				     unsigned int len, void *data,
+				     enum dma_data_direction dir,
+				     unsigned long attrs)
+{
+	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
+}
+
+static void map_single_talitos_ptr(struct device *dev,
+				   struct talitos_ptr *ptr,
+				   unsigned int len, void *data,
+				   enum dma_data_direction dir)
+{
+	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
+}
+
+static void map_single_talitos_ptr_nosync(struct device *dev,
+					  struct talitos_ptr *ptr,
+					  unsigned int len, void *data,
+					  enum dma_data_direction dir)
+{
+	__map_single_talitos_ptr(dev, ptr, len, data, dir,
+				 DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+/*
+ * unmap bus single (contiguous) h/w descriptor pointer
+ */
+static void unmap_single_talitos_ptr(struct device *dev,
+				     struct talitos_ptr *ptr,
+				     enum dma_data_direction dir)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
+			 from_talitos_ptr_len(ptr, is_sec1), dir);
+}
+
+static int reset_channel(struct device *dev, int ch)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	unsigned int timeout = TALITOS_TIMEOUT;
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	if (is_sec1) {
+		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
+			  TALITOS1_CCCR_LO_RESET);
+
+		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
+			TALITOS1_CCCR_LO_RESET) && --timeout)
+			cpu_relax();
+	} else {
+		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
+			  TALITOS2_CCCR_RESET);
+
+		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
+			TALITOS2_CCCR_RESET) && --timeout)
+			cpu_relax();
+	}
+
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset channel %d\n", ch);
+		return -EIO;
+	}
+
+	/* set 36-bit addressing, done writeback enable and done IRQ enable */
+	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
+		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
+	/* enable chaining descriptors */
+	if (is_sec1)
+		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
+			  TALITOS_CCCR_LO_NE);
+
+	/* and ICCR writeback, if available */
+	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
+		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
+		          TALITOS_CCCR_LO_IWSE);
+
+	return 0;
+}
+
+static int reset_device(struct device *dev)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	unsigned int timeout = TALITOS_TIMEOUT;
+	bool is_sec1 = has_ftr_sec1(priv);
+	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
+
+	setbits32(priv->reg + TALITOS_MCR, mcr);
+
+	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
+	       && --timeout)
+		cpu_relax();
+
+	if (priv->irq[1]) {
+		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
+		setbits32(priv->reg + TALITOS_MCR, mcr);
+	}
+
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset device\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * Reset and initialize the device
+ */
+static int init_device(struct device *dev)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int ch, err;
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	/*
+	 * Master reset
+	 * errata documentation: warning: certain SEC interrupts
+	 * are not fully cleared by writing the MCR:SWR bit,
+	 * set bit twice to completely reset
+	 */
+	err = reset_device(dev);
+	if (err)
+		return err;
+
+	err = reset_device(dev);
+	if (err)
+		return err;
+
+	/* reset channels */
+	for (ch = 0; ch < priv->num_channels; ch++) {
+		err = reset_channel(dev, ch);
+		if (err)
+			return err;
+	}
+
+	/* enable channel done and error interrupts */
+	if (is_sec1) {
+		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
+		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
+		/* disable parity error check in DEU (erroneous? test vect.) */
+		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
+	} else {
+		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
+		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
+	}
+
+	/* disable integrity check error interrupts (use writeback instead) */
+	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
+		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
+		          TALITOS_MDEUICR_LO_ICE);
+
+	return 0;
+}
+
+/**
+ * talitos_submit - submits a descriptor to the device for processing
+ * @dev:	the SEC device to be used
+ * @ch:		the SEC device channel to be used
+ * @desc:	the descriptor to be processed by the device
+ * @callback:	whom to call when processing is complete
+ * @context:	a handle for use by caller (optional)
+ *
+ * desc must contain valid dma-mapped (bus physical) address pointers.
+ * callback must check err and feedback in descriptor header
+ * for device processing status.
+ */
+int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
+		   void (*callback)(struct device *dev,
+				    struct talitos_desc *desc,
+				    void *context, int error),
+		   void *context)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	struct talitos_request *request;
+	unsigned long flags;
+	int head;
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
+
+	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
+		/* h/w fifo is full */
+		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
+		return -EAGAIN;
+	}
+
+	head = priv->chan[ch].head;
+	request = &priv->chan[ch].fifo[head];
+
+	/* map descriptor and save caller data */
+	if (is_sec1) {
+		desc->hdr1 = desc->hdr;
+		request->dma_desc = dma_map_single(dev, &desc->hdr1,
+						   TALITOS_DESC_SIZE,
+						   DMA_BIDIRECTIONAL);
+	} else {
+		request->dma_desc = dma_map_single(dev, desc,
+						   TALITOS_DESC_SIZE,
+						   DMA_BIDIRECTIONAL);
+	}
+	request->callback = callback;
+	request->context = context;
+
+	/* increment fifo head */
+	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
+
+	smp_wmb();
+	request->desc = desc;
+
+	/* GO! */
+	wmb();
+	out_be32(priv->chan[ch].reg + TALITOS_FF,
+		 upper_32_bits(request->dma_desc));
+	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
+		 lower_32_bits(request->dma_desc));
+
+	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
+
+	return -EINPROGRESS;
+}
+EXPORT_SYMBOL(talitos_submit);
+
+/*
+ * process what was done, notify callback of error if not
+ */
+static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	struct talitos_request *request, saved_req;
+	unsigned long flags;
+	int tail, status;
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
+
+	tail = priv->chan[ch].tail;
+	while (priv->chan[ch].fifo[tail].desc) {
+		__be32 hdr;
+
+		request = &priv->chan[ch].fifo[tail];
+
+		/* descriptors with their done bits set don't get the error */
+		rmb();
+		if (!is_sec1)
+			hdr = request->desc->hdr;
+		else if (request->desc->next_desc)
+			hdr = (request->desc + 1)->hdr1;
+		else
+			hdr = request->desc->hdr1;
+
+		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
+			status = 0;
+		else
+			if (!error)
+				break;
+			else
+				status = error;
+
+		dma_unmap_single(dev, request->dma_desc,
+				 TALITOS_DESC_SIZE,
+				 DMA_BIDIRECTIONAL);
+
+		/* copy entries so we can call callback outside lock */
+		saved_req.desc = request->desc;
+		saved_req.callback = request->callback;
+		saved_req.context = request->context;
+
+		/* release request entry in fifo */
+		smp_wmb();
+		request->desc = NULL;
+
+		/* increment fifo tail */
+		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
+
+		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
+
+		atomic_dec(&priv->chan[ch].submit_count);
+
+		saved_req.callback(dev, saved_req.desc, saved_req.context,
+				   status);
+		/* channel may resume processing in single desc error case */
+		if (error && !reset_ch && status == error)
+			return;
+		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
+		tail = priv->chan[ch].tail;
+	}
+
+	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
+}
+
+/*
+ * process completed requests for channels that have done status
+ */
+#define DEF_TALITOS1_DONE(name, ch_done_mask)				\
+static void talitos1_done_##name(unsigned long data)			\
+{									\
+	struct device *dev = (struct device *)data;			\
+	struct talitos_private *priv = dev_get_drvdata(dev);		\
+	unsigned long flags;						\
+									\
+	if (ch_done_mask & 0x10000000)					\
+		flush_channel(dev, 0, 0, 0);			\
+	if (ch_done_mask & 0x40000000)					\
+		flush_channel(dev, 1, 0, 0);			\
+	if (ch_done_mask & 0x00010000)					\
+		flush_channel(dev, 2, 0, 0);			\
+	if (ch_done_mask & 0x00040000)					\
+		flush_channel(dev, 3, 0, 0);			\
+									\
+	/* At this point, all completed channels have been processed */	\
+	/* Unmask done interrupts for channels completed later on. */	\
+	spin_lock_irqsave(&priv->reg_lock, flags);			\
+	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
+	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
+	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
+}
+
+DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
+DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
+
+#define DEF_TALITOS2_DONE(name, ch_done_mask)				\
+static void talitos2_done_##name(unsigned long data)			\
+{									\
+	struct device *dev = (struct device *)data;			\
+	struct talitos_private *priv = dev_get_drvdata(dev);		\
+	unsigned long flags;						\
+									\
+	if (ch_done_mask & 1)						\
+		flush_channel(dev, 0, 0, 0);				\
+	if (ch_done_mask & (1 << 2))					\
+		flush_channel(dev, 1, 0, 0);				\
+	if (ch_done_mask & (1 << 4))					\
+		flush_channel(dev, 2, 0, 0);				\
+	if (ch_done_mask & (1 << 6))					\
+		flush_channel(dev, 3, 0, 0);				\
+									\
+	/* At this point, all completed channels have been processed */	\
+	/* Unmask done interrupts for channels completed later on. */	\
+	spin_lock_irqsave(&priv->reg_lock, flags);			\
+	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
+	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
+	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
+}
+
+DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
+DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
+DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
+DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
+
+/*
+ * locate current (offending) descriptor
+ */
+static u32 current_desc_hdr(struct device *dev, int ch)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int tail, iter;
+	dma_addr_t cur_desc;
+
+	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
+	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
+
+	if (!cur_desc) {
+		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
+		return 0;
+	}
+
+	tail = priv->chan[ch].tail;
+
+	iter = tail;
+	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
+	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
+		iter = (iter + 1) & (priv->fifo_len - 1);
+		if (iter == tail) {
+			dev_err(dev, "couldn't locate current descriptor\n");
+			return 0;
+		}
+	}
+
+	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
+		return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
+
+	return priv->chan[ch].fifo[iter].desc->hdr;
+}
+
+/*
+ * user diagnostics; report root cause of error based on execution unit status
+ */
+static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int i;
+
+	if (!desc_hdr)
+		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
+
+	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
+	case DESC_HDR_SEL0_AFEU:
+		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_afeu + TALITOS_EUISR),
+			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
+		break;
+	case DESC_HDR_SEL0_DEU:
+		dev_err(dev, "DEUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_deu + TALITOS_EUISR),
+			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
+		break;
+	case DESC_HDR_SEL0_MDEUA:
+	case DESC_HDR_SEL0_MDEUB:
+		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_mdeu + TALITOS_EUISR),
+			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
+		break;
+	case DESC_HDR_SEL0_RNG:
+		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_rngu + TALITOS_ISR),
+			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
+		break;
+	case DESC_HDR_SEL0_PKEU:
+		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_pkeu + TALITOS_EUISR),
+			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
+		break;
+	case DESC_HDR_SEL0_AESU:
+		dev_err(dev, "AESUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_aesu + TALITOS_EUISR),
+			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
+		break;
+	case DESC_HDR_SEL0_CRCU:
+		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_crcu + TALITOS_EUISR),
+			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
+		break;
+	case DESC_HDR_SEL0_KEU:
+		dev_err(dev, "KEUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_pkeu + TALITOS_EUISR),
+			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
+		break;
+	}
+
+	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
+	case DESC_HDR_SEL1_MDEUA:
+	case DESC_HDR_SEL1_MDEUB:
+		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_mdeu + TALITOS_EUISR),
+			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
+		break;
+	case DESC_HDR_SEL1_CRCU:
+		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
+			in_be32(priv->reg_crcu + TALITOS_EUISR),
+			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
+		break;
+	}
+
+	for (i = 0; i < 8; i++)
+		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
+			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
+			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
+}
+
+/*
+ * recover from error interrupts
+ */
+static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	unsigned int timeout = TALITOS_TIMEOUT;
+	int ch, error, reset_dev = 0;
+	u32 v_lo;
+	bool is_sec1 = has_ftr_sec1(priv);
+	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
+
+	for (ch = 0; ch < priv->num_channels; ch++) {
+		/* skip channels without errors */
+		if (is_sec1) {
+			/* bits 29, 31, 17, 19 */
+			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
+				continue;
+		} else {
+			if (!(isr & (1 << (ch * 2 + 1))))
+				continue;
+		}
+
+		error = -EINVAL;
+
+		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
+
+		if (v_lo & TALITOS_CCPSR_LO_DOF) {
+			dev_err(dev, "double fetch fifo overflow error\n");
+			error = -EAGAIN;
+			reset_ch = 1;
+		}
+		if (v_lo & TALITOS_CCPSR_LO_SOF) {
+			/* h/w dropped descriptor */
+			dev_err(dev, "single fetch fifo overflow error\n");
+			error = -EAGAIN;
+		}
+		if (v_lo & TALITOS_CCPSR_LO_MDTE)
+			dev_err(dev, "master data transfer error\n");
+		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
+			dev_err(dev, is_sec1 ? "pointer not complete error\n"
+					     : "s/g data length zero error\n");
+		if (v_lo & TALITOS_CCPSR_LO_FPZ)
+			dev_err(dev, is_sec1 ? "parity error\n"
+					     : "fetch pointer zero error\n");
+		if (v_lo & TALITOS_CCPSR_LO_IDH)
+			dev_err(dev, "illegal descriptor header error\n");
+		if (v_lo & TALITOS_CCPSR_LO_IEU)
+			dev_err(dev, is_sec1 ? "static assignment error\n"
+					     : "invalid exec unit error\n");
+		if (v_lo & TALITOS_CCPSR_LO_EU)
+			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
+		if (!is_sec1) {
+			if (v_lo & TALITOS_CCPSR_LO_GB)
+				dev_err(dev, "gather boundary error\n");
+			if (v_lo & TALITOS_CCPSR_LO_GRL)
+				dev_err(dev, "gather return/length error\n");
+			if (v_lo & TALITOS_CCPSR_LO_SB)
+				dev_err(dev, "scatter boundary error\n");
+			if (v_lo & TALITOS_CCPSR_LO_SRL)
+				dev_err(dev, "scatter return/length error\n");
+		}
+
+		flush_channel(dev, ch, error, reset_ch);
+
+		if (reset_ch) {
+			reset_channel(dev, ch);
+		} else {
+			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
+				  TALITOS2_CCCR_CONT);
+			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
+			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
+			       TALITOS2_CCCR_CONT) && --timeout)
+				cpu_relax();
+			if (timeout == 0) {
+				dev_err(dev, "failed to restart channel %d\n",
+					ch);
+				reset_dev = 1;
+			}
+		}
+	}
+	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
+	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
+		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
+			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
+				isr, isr_lo);
+		else
+			dev_err(dev, "done overflow, internal time out, or "
+				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
+
+		/* purge request queues */
+		for (ch = 0; ch < priv->num_channels; ch++)
+			flush_channel(dev, ch, -EIO, 1);
+
+		/* reset and reinitialize the device */
+		init_device(dev);
+	}
+}
+
+#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
+static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
+{									       \
+	struct device *dev = data;					       \
+	struct talitos_private *priv = dev_get_drvdata(dev);		       \
+	u32 isr, isr_lo;						       \
+	unsigned long flags;						       \
+									       \
+	spin_lock_irqsave(&priv->reg_lock, flags);			       \
+	isr = in_be32(priv->reg + TALITOS_ISR);				       \
+	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
+	/* Acknowledge interrupt */					       \
+	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
+	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
+									       \
+	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
+		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
+		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
+	}								       \
+	else {								       \
+		if (likely(isr & ch_done_mask)) {			       \
+			/* mask further done interrupts. */		       \
+			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
+			/* done_task will unmask done interrupts at exit */    \
+			tasklet_schedule(&priv->done_task[tlet]);	       \
+		}							       \
+		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
+	}								       \
+									       \
+	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
+								IRQ_NONE;      \
+}
+
+DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
+
+#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
+static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
+{									       \
+	struct device *dev = data;					       \
+	struct talitos_private *priv = dev_get_drvdata(dev);		       \
+	u32 isr, isr_lo;						       \
+	unsigned long flags;						       \
+									       \
+	spin_lock_irqsave(&priv->reg_lock, flags);			       \
+	isr = in_be32(priv->reg + TALITOS_ISR);				       \
+	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
+	/* Acknowledge interrupt */					       \
+	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
+	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
+									       \
+	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
+		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
+		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
+	}								       \
+	else {								       \
+		if (likely(isr & ch_done_mask)) {			       \
+			/* mask further done interrupts. */		       \
+			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
+			/* done_task will unmask done interrupts at exit */    \
+			tasklet_schedule(&priv->done_task[tlet]);	       \
+		}							       \
+		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
+	}								       \
+									       \
+	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
+								IRQ_NONE;      \
+}
+
+DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
+DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
+		       0)
+DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
+		       1)
+
+/*
+ * hwrng
+ */
+static int talitos_rng_data_present(struct hwrng *rng, int wait)
+{
+	struct device *dev = (struct device *)rng->priv;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	u32 ofl;
+	int i;
+
+	for (i = 0; i < 20; i++) {
+		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
+		      TALITOS_RNGUSR_LO_OFL;
+		if (ofl || !wait)
+			break;
+		udelay(10);
+	}
+
+	return !!ofl;
+}
+
+static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
+{
+	struct device *dev = (struct device *)rng->priv;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+
+	/* rng fifo requires 64-bit accesses */
+	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
+	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
+
+	return sizeof(u32);
+}
+
+static int talitos_rng_init(struct hwrng *rng)
+{
+	struct device *dev = (struct device *)rng->priv;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	unsigned int timeout = TALITOS_TIMEOUT;
+
+	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
+	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
+		 & TALITOS_RNGUSR_LO_RD)
+	       && --timeout)
+		cpu_relax();
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset rng hw\n");
+		return -ENODEV;
+	}
+
+	/* start generating */
+	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
+
+	return 0;
+}
+
+static int talitos_register_rng(struct device *dev)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int err;
+
+	priv->rng.name		= dev_driver_string(dev),
+	priv->rng.init		= talitos_rng_init,
+	priv->rng.data_present	= talitos_rng_data_present,
+	priv->rng.data_read	= talitos_rng_data_read,
+	priv->rng.priv		= (unsigned long)dev;
+
+	err = hwrng_register(&priv->rng);
+	if (!err)
+		priv->rng_registered = true;
+
+	return err;
+}
+
+static void talitos_unregister_rng(struct device *dev)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+
+	if (!priv->rng_registered)
+		return;
+
+	hwrng_unregister(&priv->rng);
+	priv->rng_registered = false;
+}
+
+/*
+ * crypto alg
+ */
+#define TALITOS_CRA_PRIORITY		3000
+/*
+ * Defines a priority for doing AEAD with descriptors type
+ * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
+ */
+#define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
+#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+
+struct talitos_ctx {
+	struct device *dev;
+	int ch;
+	__be32 desc_hdr_template;
+	u8 key[TALITOS_MAX_KEY_SIZE];
+	u8 iv[TALITOS_MAX_IV_LENGTH];
+	dma_addr_t dma_key;
+	unsigned int keylen;
+	unsigned int enckeylen;
+	unsigned int authkeylen;
+};
+
+#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
+#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
+
+struct talitos_ahash_req_ctx {
+	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+	unsigned int hw_context_size;
+	u8 buf[2][HASH_MAX_BLOCK_SIZE];
+	int buf_idx;
+	unsigned int swinit;
+	unsigned int first;
+	unsigned int last;
+	unsigned int to_hash_later;
+	unsigned int nbuf;
+	struct scatterlist bufsl[2];
+	struct scatterlist *psrc;
+};
+
+struct talitos_export_state {
+	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+	u8 buf[HASH_MAX_BLOCK_SIZE];
+	unsigned int swinit;
+	unsigned int first;
+	unsigned int last;
+	unsigned int to_hash_later;
+	unsigned int nbuf;
+};
+
+static int aead_setkey(struct crypto_aead *authenc,
+		       const u8 *key, unsigned int keylen)
+{
+	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+	struct device *dev = ctx->dev;
+	struct crypto_authenc_keys keys;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
+		goto badkey;
+
+	if (ctx->keylen)
+		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+	memcpy(ctx->key, keys.authkey, keys.authkeylen);
+	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
+
+	ctx->keylen = keys.authkeylen + keys.enckeylen;
+	ctx->enckeylen = keys.enckeylen;
+	ctx->authkeylen = keys.authkeylen;
+	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
+				      DMA_TO_DEVICE);
+
+	memzero_explicit(&keys, sizeof(keys));
+	return 0;
+
+badkey:
+	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+/*
+ * talitos_edesc - s/w-extended descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @icv_ool: whether ICV is out-of-line
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @dma_len: length of dma mapped link_tbl space
+ * @dma_link_tbl: bus physical address of link_tbl/buf
+ * @desc: h/w descriptor
+ * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
+ * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
+ *
+ * if decrypting (with authcheck), or either one of src_nents or dst_nents
+ * is greater than 1, an integrity check value is concatenated to the end
+ * of link_tbl data
+ */
+struct talitos_edesc {
+	int src_nents;
+	int dst_nents;
+	bool icv_ool;
+	dma_addr_t iv_dma;
+	int dma_len;
+	dma_addr_t dma_link_tbl;
+	struct talitos_desc desc;
+	union {
+		struct talitos_ptr link_tbl[0];
+		u8 buf[0];
+	};
+};
+
+static void talitos_sg_unmap(struct device *dev,
+			     struct talitos_edesc *edesc,
+			     struct scatterlist *src,
+			     struct scatterlist *dst,
+			     unsigned int len, unsigned int offset)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+	unsigned int src_nents = edesc->src_nents ? : 1;
+	unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+	if (is_sec1 && dst && dst_nents > 1) {
+		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
+					   len, DMA_FROM_DEVICE);
+		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
+				     offset);
+	}
+	if (src != dst) {
+		if (src_nents == 1 || !is_sec1)
+			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+
+		if (dst && (dst_nents == 1 || !is_sec1))
+			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+	} else if (src_nents == 1 || !is_sec1) {
+		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+	}
+}
+
+static void ipsec_esp_unmap(struct device *dev,
+			    struct talitos_edesc *edesc,
+			    struct aead_request *areq)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
+	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
+
+	if (is_ipsec_esp)
+		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
+					 DMA_FROM_DEVICE);
+	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
+
+	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
+			 areq->assoclen);
+
+	if (edesc->dma_len)
+		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+				 DMA_BIDIRECTIONAL);
+
+	if (!is_ipsec_esp) {
+		unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
+				   areq->assoclen + areq->cryptlen - ivsize);
+	}
+}
+
+/*
+ * ipsec_esp descriptor callbacks
+ */
+static void ipsec_esp_encrypt_done(struct device *dev,
+				   struct talitos_desc *desc, void *context,
+				   int err)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+	struct aead_request *areq = context;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	unsigned int authsize = crypto_aead_authsize(authenc);
+	unsigned int ivsize = crypto_aead_ivsize(authenc);
+	struct talitos_edesc *edesc;
+	struct scatterlist *sg;
+	void *icvdata;
+
+	edesc = container_of(desc, struct talitos_edesc, desc);
+
+	ipsec_esp_unmap(dev, edesc, areq);
+
+	/* copy the generated ICV to dst */
+	if (edesc->icv_ool) {
+		if (is_sec1)
+			icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
+		else
+			icvdata = &edesc->link_tbl[edesc->src_nents +
+						   edesc->dst_nents + 2];
+		sg = sg_last(areq->dst, edesc->dst_nents);
+		memcpy((char *)sg_virt(sg) + sg->length - authsize,
+		       icvdata, authsize);
+	}
+
+	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
+
+	kfree(edesc);
+
+	aead_request_complete(areq, err);
+}
+
+static void ipsec_esp_decrypt_swauth_done(struct device *dev,
+					  struct talitos_desc *desc,
+					  void *context, int err)
+{
+	struct aead_request *req = context;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(authenc);
+	struct talitos_edesc *edesc;
+	struct scatterlist *sg;
+	char *oicv, *icv;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	edesc = container_of(desc, struct talitos_edesc, desc);
+
+	ipsec_esp_unmap(dev, edesc, req);
+
+	if (!err) {
+		/* auth check */
+		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
+		icv = (char *)sg_virt(sg) + sg->length - authsize;
+
+		if (edesc->dma_len) {
+			if (is_sec1)
+				oicv = (char *)&edesc->dma_link_tbl +
+					       req->assoclen + req->cryptlen;
+			else
+				oicv = (char *)
+				       &edesc->link_tbl[edesc->src_nents +
+							edesc->dst_nents + 2];
+			if (edesc->icv_ool)
+				icv = oicv + authsize;
+		} else
+			oicv = (char *)&edesc->link_tbl[0];
+
+		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
+	}
+
+	kfree(edesc);
+
+	aead_request_complete(req, err);
+}
+
+static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
+					  struct talitos_desc *desc,
+					  void *context, int err)
+{
+	struct aead_request *req = context;
+	struct talitos_edesc *edesc;
+
+	edesc = container_of(desc, struct talitos_edesc, desc);
+
+	ipsec_esp_unmap(dev, edesc, req);
+
+	/* check ICV auth status */
+	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
+		     DESC_HDR_LO_ICCR1_PASS))
+		err = -EBADMSG;
+
+	kfree(edesc);
+
+	aead_request_complete(req, err);
+}
+
+/*
+ * convert scatterlist to SEC h/w link table format
+ * stop at cryptlen bytes
+ */
+static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+				 unsigned int offset, int cryptlen,
+				 struct talitos_ptr *link_tbl_ptr)
+{
+	int n_sg = sg_count;
+	int count = 0;
+
+	while (cryptlen && sg && n_sg--) {
+		unsigned int len = sg_dma_len(sg);
+
+		if (offset >= len) {
+			offset -= len;
+			goto next;
+		}
+
+		len -= offset;
+
+		if (len > cryptlen)
+			len = cryptlen;
+
+		to_talitos_ptr(link_tbl_ptr + count,
+			       sg_dma_address(sg) + offset, len, 0);
+		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
+		count++;
+		cryptlen -= len;
+		offset = 0;
+
+next:
+		sg = sg_next(sg);
+	}
+
+	/* tag end of link table */
+	if (count > 0)
+		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
+				       DESC_PTR_LNKTBL_RETURN, 0);
+
+	return count;
+}
+
+static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+			      unsigned int len, struct talitos_edesc *edesc,
+			      struct talitos_ptr *ptr, int sg_count,
+			      unsigned int offset, int tbl_off, int elen)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	if (!src) {
+		to_talitos_ptr(ptr, 0, 0, is_sec1);
+		return 1;
+	}
+	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
+	if (sg_count == 1) {
+		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
+		return sg_count;
+	}
+	if (is_sec1) {
+		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
+		return sg_count;
+	}
+	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
+					 &edesc->link_tbl[tbl_off]);
+	if (sg_count == 1) {
+		/* Only one segment now, so no link tbl needed*/
+		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
+		return sg_count;
+	}
+	to_talitos_ptr(ptr, edesc->dma_link_tbl +
+			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
+	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
+
+	return sg_count;
+}
+
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+			  unsigned int len, struct talitos_edesc *edesc,
+			  struct talitos_ptr *ptr, int sg_count,
+			  unsigned int offset, int tbl_off)
+{
+	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+				  tbl_off, 0);
+}
+
+/*
+ * fill in and submit ipsec_esp descriptor
+ */
+static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+		     void (*callback)(struct device *dev,
+				      struct talitos_desc *desc,
+				      void *context, int error))
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	unsigned int authsize = crypto_aead_authsize(aead);
+	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *dev = ctx->dev;
+	struct talitos_desc *desc = &edesc->desc;
+	unsigned int cryptlen = areq->cryptlen;
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	int tbl_off = 0;
+	int sg_count, ret;
+	int elen = 0;
+	bool sync_needed = false;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
+	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
+	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
+
+	/* hmac key */
+	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
+
+	sg_count = edesc->src_nents ?: 1;
+	if (is_sec1 && sg_count > 1)
+		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
+				  areq->assoclen + cryptlen);
+	else
+		sg_count = dma_map_sg(dev, areq->src, sg_count,
+				      (areq->src == areq->dst) ?
+				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* hmac data */
+	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
+			     &desc->ptr[1], sg_count, 0, tbl_off);
+
+	if (ret > 1) {
+		tbl_off += ret;
+		sync_needed = true;
+	}
+
+	/* cipher iv */
+	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
+
+	/* cipher key */
+	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
+		       ctx->enckeylen, is_sec1);
+
+	/*
+	 * cipher in
+	 * map and adjust cipher len to aead request cryptlen.
+	 * extent is bytes of HMAC postpended to ciphertext,
+	 * typically 12 for ipsec
+	 */
+	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+		elen = authsize;
+
+	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+				 sg_count, areq->assoclen, tbl_off, elen);
+
+	if (ret > 1) {
+		tbl_off += ret;
+		sync_needed = true;
+	}
+
+	/* cipher out */
+	if (areq->src != areq->dst) {
+		sg_count = edesc->dst_nents ? : 1;
+		if (!is_sec1 || sg_count == 1)
+			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
+	}
+
+	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+			     sg_count, areq->assoclen, tbl_off);
+
+	if (is_ipsec_esp)
+		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
+
+	/* ICV data */
+	if (ret > 1) {
+		tbl_off += ret;
+		edesc->icv_ool = true;
+		sync_needed = true;
+
+		if (is_ipsec_esp) {
+			struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+			int offset = (edesc->src_nents + edesc->dst_nents + 2) *
+				     sizeof(struct talitos_ptr) + authsize;
+
+			/* Add an entry to the link table for ICV data */
+			to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
+			to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
+					       is_sec1);
+
+			/* icv data follows link tables */
+			to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
+				       authsize, is_sec1);
+		} else {
+			dma_addr_t addr = edesc->dma_link_tbl;
+
+			if (is_sec1)
+				addr += areq->assoclen + cryptlen;
+			else
+				addr += sizeof(struct talitos_ptr) * tbl_off;
+
+			to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
+		}
+	} else if (!is_ipsec_esp) {
+		ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
+				     &desc->ptr[6], sg_count, areq->assoclen +
+							      cryptlen,
+				     tbl_off);
+		if (ret > 1) {
+			tbl_off += ret;
+			edesc->icv_ool = true;
+			sync_needed = true;
+		} else {
+			edesc->icv_ool = false;
+		}
+	} else {
+		edesc->icv_ool = false;
+	}
+
+	/* iv out */
+	if (is_ipsec_esp)
+		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
+				       DMA_FROM_DEVICE);
+
+	if (sync_needed)
+		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+					   edesc->dma_len,
+					   DMA_BIDIRECTIONAL);
+
+	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
+	if (ret != -EINPROGRESS) {
+		ipsec_esp_unmap(dev, edesc, areq);
+		kfree(edesc);
+	}
+	return ret;
+}
+
+/*
+ * allocate and map the extended descriptor
+ */
+static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+						 struct scatterlist *src,
+						 struct scatterlist *dst,
+						 u8 *iv,
+						 unsigned int assoclen,
+						 unsigned int cryptlen,
+						 unsigned int authsize,
+						 unsigned int ivsize,
+						 int icv_stashing,
+						 u32 cryptoflags,
+						 bool encrypt)
+{
+	struct talitos_edesc *edesc;
+	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
+	dma_addr_t iv_dma = 0;
+	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		      GFP_ATOMIC;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
+	void *err;
+
+	if (cryptlen + authsize > max_len) {
+		dev_err(dev, "length exceeds h/w max limit\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (ivsize)
+		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+
+	if (!dst || dst == src) {
+		src_len = assoclen + cryptlen + authsize;
+		src_nents = sg_nents_for_len(src, src_len);
+		if (src_nents < 0) {
+			dev_err(dev, "Invalid number of src SG.\n");
+			err = ERR_PTR(-EINVAL);
+			goto error_sg;
+		}
+		src_nents = (src_nents == 1) ? 0 : src_nents;
+		dst_nents = dst ? src_nents : 0;
+		dst_len = 0;
+	} else { /* dst && dst != src*/
+		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
+		src_nents = sg_nents_for_len(src, src_len);
+		if (src_nents < 0) {
+			dev_err(dev, "Invalid number of src SG.\n");
+			err = ERR_PTR(-EINVAL);
+			goto error_sg;
+		}
+		src_nents = (src_nents == 1) ? 0 : src_nents;
+		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
+		dst_nents = sg_nents_for_len(dst, dst_len);
+		if (dst_nents < 0) {
+			dev_err(dev, "Invalid number of dst SG.\n");
+			err = ERR_PTR(-EINVAL);
+			goto error_sg;
+		}
+		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+	}
+
+	/*
+	 * allocate space for base edesc plus the link tables,
+	 * allowing for two separate entries for AD and generated ICV (+ 2),
+	 * and space for two sets of ICVs (stashed and generated)
+	 */
+	alloc_len = sizeof(struct talitos_edesc);
+	if (src_nents || dst_nents) {
+		if (is_sec1)
+			dma_len = (src_nents ? src_len : 0) +
+				  (dst_nents ? dst_len : 0);
+		else
+			dma_len = (src_nents + dst_nents + 2) *
+				  sizeof(struct talitos_ptr) + authsize * 2;
+		alloc_len += dma_len;
+	} else {
+		dma_len = 0;
+		alloc_len += icv_stashing ? authsize : 0;
+	}
+
+	/* if its a ahash, add space for a second desc next to the first one */
+	if (is_sec1 && !dst)
+		alloc_len += sizeof(struct talitos_desc);
+
+	edesc = kmalloc(alloc_len, GFP_DMA | flags);
+	if (!edesc) {
+		err = ERR_PTR(-ENOMEM);
+		goto error_sg;
+	}
+	memset(&edesc->desc, 0, sizeof(edesc->desc));
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->dma_len = dma_len;
+	if (dma_len) {
+		void *addr = &edesc->link_tbl[0];
+
+		if (is_sec1 && !dst)
+			addr += sizeof(struct talitos_desc);
+		edesc->dma_link_tbl = dma_map_single(dev, addr,
+						     edesc->dma_len,
+						     DMA_BIDIRECTIONAL);
+	}
+	return edesc;
+error_sg:
+	if (iv_dma)
+		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+	return err;
+}
+
+static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
+					      int icv_stashing, bool encrypt)
+{
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	unsigned int authsize = crypto_aead_authsize(authenc);
+	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+	unsigned int ivsize = crypto_aead_ivsize(authenc);
+
+	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+				   iv, areq->assoclen, areq->cryptlen,
+				   authsize, ivsize, icv_stashing,
+				   areq->base.flags, encrypt);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+	struct talitos_edesc *edesc;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, req->iv, 0, true);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* set encrypt */
+	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
+
+	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(authenc);
+	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
+	struct talitos_edesc *edesc;
+	struct scatterlist *sg;
+	void *icvdata;
+
+	req->cryptlen -= authsize;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, req->iv, 1, false);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+	    ((!edesc->src_nents && !edesc->dst_nents) ||
+	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
+
+		/* decrypt and check the ICV */
+		edesc->desc.hdr = ctx->desc_hdr_template |
+				  DESC_HDR_DIR_INBOUND |
+				  DESC_HDR_MODE1_MDEU_CICV;
+
+		/* reset integrity check result bits */
+
+		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
+	}
+
+	/* Have to check the ICV with software */
+	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+
+	/* stash incoming ICV for later cmp with ICV generated by the h/w */
+	if (edesc->dma_len)
+		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
+						   edesc->dst_nents + 2];
+	else
+		icvdata = &edesc->link_tbl[0];
+
+	sg = sg_last(req->src, edesc->src_nents ? : 1);
+
+	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
+
+	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct device *dev = ctx->dev;
+	u32 tmp[DES_EXPKEY_WORDS];
+
+	if (keylen > TALITOS_MAX_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
+		     CRYPTO_TFM_REQ_WEAK_KEY) &&
+	    !des_ekey(tmp, key)) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
+		return -EINVAL;
+	}
+
+	if (ctx->keylen)
+		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+	memcpy(&ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+static void common_nonsnoop_unmap(struct device *dev,
+				  struct talitos_edesc *edesc,
+				  struct ablkcipher_request *areq)
+{
+	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+
+	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
+	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
+
+	if (edesc->dma_len)
+		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+				 DMA_BIDIRECTIONAL);
+}
+
+static void ablkcipher_done(struct device *dev,
+			    struct talitos_desc *desc, void *context,
+			    int err)
+{
+	struct ablkcipher_request *areq = context;
+	struct talitos_edesc *edesc;
+
+	edesc = container_of(desc, struct talitos_edesc, desc);
+
+	common_nonsnoop_unmap(dev, edesc, areq);
+
+	kfree(edesc);
+
+	areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop(struct talitos_edesc *edesc,
+			   struct ablkcipher_request *areq,
+			   void (*callback) (struct device *dev,
+					     struct talitos_desc *desc,
+					     void *context, int error))
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct device *dev = ctx->dev;
+	struct talitos_desc *desc = &edesc->desc;
+	unsigned int cryptlen = areq->nbytes;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+	int sg_count, ret;
+	bool sync_needed = false;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	/* first DWORD empty */
+
+	/* cipher iv */
+	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
+
+	/* cipher key */
+	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
+
+	sg_count = edesc->src_nents ?: 1;
+	if (is_sec1 && sg_count > 1)
+		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
+				  cryptlen);
+	else
+		sg_count = dma_map_sg(dev, areq->src, sg_count,
+				      (areq->src == areq->dst) ?
+				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	/*
+	 * cipher in
+	 */
+	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
+				  &desc->ptr[3], sg_count, 0, 0);
+	if (sg_count > 1)
+		sync_needed = true;
+
+	/* cipher out */
+	if (areq->src != areq->dst) {
+		sg_count = edesc->dst_nents ? : 1;
+		if (!is_sec1 || sg_count == 1)
+			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
+	}
+
+	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
+			     sg_count, 0, (edesc->src_nents + 1));
+	if (ret > 1)
+		sync_needed = true;
+
+	/* iv out */
+	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
+			       DMA_FROM_DEVICE);
+
+	/* last DWORD empty */
+
+	if (sync_needed)
+		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+					   edesc->dma_len, DMA_BIDIRECTIONAL);
+
+	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
+	if (ret != -EINPROGRESS) {
+		common_nonsnoop_unmap(dev, edesc, areq);
+		kfree(edesc);
+	}
+	return ret;
+}
+
+static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
+						    areq, bool encrypt)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+
+	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
+				   areq->base.flags, encrypt);
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct talitos_edesc *edesc;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(areq, true);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* set encrypt */
+	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
+
+	return common_nonsnoop(edesc, areq, ablkcipher_done);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct talitos_edesc *edesc;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(areq, false);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+
+	return common_nonsnoop(edesc, areq, ablkcipher_done);
+}
+
+static void common_nonsnoop_hash_unmap(struct device *dev,
+				       struct talitos_edesc *edesc,
+				       struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+	struct talitos_desc *desc = &edesc->desc;
+	struct talitos_desc *desc2 = desc + 1;
+
+	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+	if (desc->next_desc &&
+	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
+		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
+
+	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+
+	/* When using hashctx-in, must unmap it. */
+	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
+		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+					 DMA_TO_DEVICE);
+	else if (desc->next_desc)
+		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
+					 DMA_TO_DEVICE);
+
+	if (is_sec1 && req_ctx->nbuf)
+		unmap_single_talitos_ptr(dev, &desc->ptr[3],
+					 DMA_TO_DEVICE);
+
+	if (edesc->dma_len)
+		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+				 DMA_BIDIRECTIONAL);
+
+	if (edesc->desc.next_desc)
+		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
+				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
+}
+
+static void ahash_done(struct device *dev,
+		       struct talitos_desc *desc, void *context,
+		       int err)
+{
+	struct ahash_request *areq = context;
+	struct talitos_edesc *edesc =
+		 container_of(desc, struct talitos_edesc, desc);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	if (!req_ctx->last && req_ctx->to_hash_later) {
+		/* Position any partial block for next update/final/finup */
+		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
+		req_ctx->nbuf = req_ctx->to_hash_later;
+	}
+	common_nonsnoop_hash_unmap(dev, edesc, areq);
+
+	kfree(edesc);
+
+	areq->base.complete(&areq->base, err);
+}
+
+/*
+ * SEC1 doesn't like hashing of 0 sized message, so we do the padding
+ * ourself and submit a padded block
+ */
+static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
+			       struct talitos_edesc *edesc,
+			       struct talitos_ptr *ptr)
+{
+	static u8 padded_hash[64] = {
+		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	};
+
+	pr_err_once("Bug in SEC1, padding ourself\n");
+	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
+	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
+			       (char *)padded_hash, DMA_TO_DEVICE);
+}
+
+static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+				struct ahash_request *areq, unsigned int length,
+				unsigned int offset,
+				void (*callback) (struct device *dev,
+						  struct talitos_desc *desc,
+						  void *context, int error))
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct device *dev = ctx->dev;
+	struct talitos_desc *desc = &edesc->desc;
+	int ret;
+	bool sync_needed = false;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+	int sg_count;
+
+	/* first DWORD empty */
+
+	/* hash context in */
+	if (!req_ctx->first || req_ctx->swinit) {
+		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
+					      req_ctx->hw_context_size,
+					      req_ctx->hw_context,
+					      DMA_TO_DEVICE);
+		req_ctx->swinit = 0;
+	}
+	/* Indicate next op is not the first. */
+	req_ctx->first = 0;
+
+	/* HMAC key */
+	if (ctx->keylen)
+		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
+			       is_sec1);
+
+	if (is_sec1 && req_ctx->nbuf)
+		length -= req_ctx->nbuf;
+
+	sg_count = edesc->src_nents ?: 1;
+	if (is_sec1 && sg_count > 1)
+		sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
+				   edesc->buf + sizeof(struct talitos_desc),
+				   length, req_ctx->nbuf);
+	else if (length)
+		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
+				      DMA_TO_DEVICE);
+	/*
+	 * data in
+	 */
+	if (is_sec1 && req_ctx->nbuf) {
+		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
+				       req_ctx->buf[req_ctx->buf_idx],
+				       DMA_TO_DEVICE);
+	} else {
+		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+					  &desc->ptr[3], sg_count, offset, 0);
+		if (sg_count > 1)
+			sync_needed = true;
+	}
+
+	/* fifth DWORD empty */
+
+	/* hash/HMAC out -or- hash context out */
+	if (req_ctx->last)
+		map_single_talitos_ptr(dev, &desc->ptr[5],
+				       crypto_ahash_digestsize(tfm),
+				       areq->result, DMA_FROM_DEVICE);
+	else
+		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
+					      req_ctx->hw_context_size,
+					      req_ctx->hw_context,
+					      DMA_FROM_DEVICE);
+
+	/* last DWORD empty */
+
+	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
+		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
+
+	if (is_sec1 && req_ctx->nbuf && length) {
+		struct talitos_desc *desc2 = desc + 1;
+		dma_addr_t next_desc;
+
+		memset(desc2, 0, sizeof(*desc2));
+		desc2->hdr = desc->hdr;
+		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
+		desc2->hdr1 = desc2->hdr;
+		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
+		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
+		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
+
+		if (desc->ptr[1].ptr)
+			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
+					 is_sec1);
+		else
+			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
+						      req_ctx->hw_context_size,
+						      req_ctx->hw_context,
+						      DMA_TO_DEVICE);
+		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
+		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+					  &desc2->ptr[3], sg_count, offset, 0);
+		if (sg_count > 1)
+			sync_needed = true;
+		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
+		if (req_ctx->last)
+			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
+						      req_ctx->hw_context_size,
+						      req_ctx->hw_context,
+						      DMA_FROM_DEVICE);
+
+		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
+					   DMA_BIDIRECTIONAL);
+		desc->next_desc = cpu_to_be32(next_desc);
+	}
+
+	if (sync_needed)
+		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+					   edesc->dma_len, DMA_BIDIRECTIONAL);
+
+	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
+	if (ret != -EINPROGRESS) {
+		common_nonsnoop_hash_unmap(dev, edesc, areq);
+		kfree(edesc);
+	}
+	return ret;
+}
+
+static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
+					       unsigned int nbytes)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	if (is_sec1)
+		nbytes -= req_ctx->nbuf;
+
+	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
+				   nbytes, 0, 0, 0, areq->base.flags, false);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct device *dev = ctx->dev;
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	unsigned int size;
+	dma_addr_t dma;
+
+	/* Initialize the context */
+	req_ctx->buf_idx = 0;
+	req_ctx->nbuf = 0;
+	req_ctx->first = 1; /* first indicates h/w must init its context */
+	req_ctx->swinit = 0; /* assume h/w init of context */
+	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+	req_ctx->hw_context_size = size;
+
+	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+			     DMA_TO_DEVICE);
+	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->hw_context[0] = SHA224_H0;
+	req_ctx->hw_context[1] = SHA224_H1;
+	req_ctx->hw_context[2] = SHA224_H2;
+	req_ctx->hw_context[3] = SHA224_H3;
+	req_ctx->hw_context[4] = SHA224_H4;
+	req_ctx->hw_context[5] = SHA224_H5;
+	req_ctx->hw_context[6] = SHA224_H6;
+	req_ctx->hw_context[7] = SHA224_H7;
+
+	/* init 64-bit count */
+	req_ctx->hw_context[8] = 0;
+	req_ctx->hw_context[9] = 0;
+
+	ahash_init(areq);
+	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+	return 0;
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct talitos_edesc *edesc;
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	unsigned int nbytes_to_hash;
+	unsigned int to_hash_later;
+	unsigned int nsg;
+	int nents;
+	struct device *dev = ctx->dev;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	bool is_sec1 = has_ftr_sec1(priv);
+	int offset = 0;
+	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
+
+	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
+		/* Buffer up to one whole block */
+		nents = sg_nents_for_len(areq->src, nbytes);
+		if (nents < 0) {
+			dev_err(ctx->dev, "Invalid number of src SG.\n");
+			return nents;
+		}
+		sg_copy_to_buffer(areq->src, nents,
+				  ctx_buf + req_ctx->nbuf, nbytes);
+		req_ctx->nbuf += nbytes;
+		return 0;
+	}
+
+	/* At least (blocksize + 1) bytes are available to hash */
+	nbytes_to_hash = nbytes + req_ctx->nbuf;
+	to_hash_later = nbytes_to_hash & (blocksize - 1);
+
+	if (req_ctx->last)
+		to_hash_later = 0;
+	else if (to_hash_later)
+		/* There is a partial block. Hash the full block(s) now */
+		nbytes_to_hash -= to_hash_later;
+	else {
+		/* Keep one block buffered */
+		nbytes_to_hash -= blocksize;
+		to_hash_later = blocksize;
+	}
+
+	/* Chain in any previously buffered data */
+	if (!is_sec1 && req_ctx->nbuf) {
+		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
+		sg_init_table(req_ctx->bufsl, nsg);
+		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
+		if (nsg > 1)
+			sg_chain(req_ctx->bufsl, 2, areq->src);
+		req_ctx->psrc = req_ctx->bufsl;
+	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+		if (nbytes_to_hash > blocksize)
+			offset = blocksize - req_ctx->nbuf;
+		else
+			offset = nbytes_to_hash - req_ctx->nbuf;
+		nents = sg_nents_for_len(areq->src, offset);
+		if (nents < 0) {
+			dev_err(ctx->dev, "Invalid number of src SG.\n");
+			return nents;
+		}
+		sg_copy_to_buffer(areq->src, nents,
+				  ctx_buf + req_ctx->nbuf, offset);
+		req_ctx->nbuf += offset;
+		req_ctx->psrc = areq->src;
+	} else
+		req_ctx->psrc = areq->src;
+
+	if (to_hash_later) {
+		nents = sg_nents_for_len(areq->src, nbytes);
+		if (nents < 0) {
+			dev_err(ctx->dev, "Invalid number of src SG.\n");
+			return nents;
+		}
+		sg_pcopy_to_buffer(areq->src, nents,
+				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
+				      to_hash_later,
+				      nbytes - to_hash_later);
+	}
+	req_ctx->to_hash_later = to_hash_later;
+
+	/* Allocate extended descriptor */
+	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	edesc->desc.hdr = ctx->desc_hdr_template;
+
+	/* On last one, request SEC to pad; otherwise continue */
+	if (req_ctx->last)
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
+	else
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
+
+	/* request SEC to INIT hash. */
+	if (req_ctx->first && !req_ctx->swinit)
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+
+	/* When the tfm context has a keylen, it's an HMAC.
+	 * A first or last (ie. not middle) descriptor must request HMAC.
+	 */
+	if (ctx->keylen && (req_ctx->first || req_ctx->last))
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
+				    ahash_done);
+}
+
+static int ahash_update(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 0;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_final(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, 0);
+}
+
+static int ahash_finup(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_digest(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+	ahash->init(areq);
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_export(struct ahash_request *areq, void *out)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct talitos_export_state *export = out;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct device *dev = ctx->dev;
+	dma_addr_t dma;
+
+	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+			     DMA_FROM_DEVICE);
+	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
+
+	memcpy(export->hw_context, req_ctx->hw_context,
+	       req_ctx->hw_context_size);
+	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
+	export->swinit = req_ctx->swinit;
+	export->first = req_ctx->first;
+	export->last = req_ctx->last;
+	export->to_hash_later = req_ctx->to_hash_later;
+	export->nbuf = req_ctx->nbuf;
+
+	return 0;
+}
+
+static int ahash_import(struct ahash_request *areq, const void *in)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct device *dev = ctx->dev;
+	const struct talitos_export_state *export = in;
+	unsigned int size;
+	dma_addr_t dma;
+
+	memset(req_ctx, 0, sizeof(*req_ctx));
+	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+	req_ctx->hw_context_size = size;
+	memcpy(req_ctx->hw_context, export->hw_context, size);
+	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
+	req_ctx->swinit = export->swinit;
+	req_ctx->first = export->first;
+	req_ctx->last = export->last;
+	req_ctx->to_hash_later = export->to_hash_later;
+	req_ctx->nbuf = export->nbuf;
+
+	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+			     DMA_TO_DEVICE);
+	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
+		   u8 *hash)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+
+	struct scatterlist sg[1];
+	struct ahash_request *req;
+	struct crypto_wait wait;
+	int ret;
+
+	crypto_init_wait(&wait);
+
+	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	/* Keep tfm keylen == 0 during hash of the long key */
+	ctx->keylen = 0;
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   crypto_req_done, &wait);
+
+	sg_init_one(&sg[0], key, keylen);
+
+	ahash_request_set_crypt(req, sg, hash, keylen);
+	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
+	ahash_request_free(req);
+
+	return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct device *dev = ctx->dev;
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	unsigned int digestsize = crypto_ahash_digestsize(tfm);
+	unsigned int keysize = keylen;
+	u8 hash[SHA512_DIGEST_SIZE];
+	int ret;
+
+	if (keylen <= blocksize)
+		memcpy(ctx->key, key, keysize);
+	else {
+		/* Must get the hash of the long key */
+		ret = keyhash(tfm, key, keylen, hash);
+
+		if (ret) {
+			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+			return -EINVAL;
+		}
+
+		keysize = digestsize;
+		memcpy(ctx->key, hash, digestsize);
+	}
+
+	if (ctx->keylen)
+		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+	ctx->keylen = keysize;
+	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+
+struct talitos_alg_template {
+	u32 type;
+	u32 priority;
+	union {
+		struct crypto_alg crypto;
+		struct ahash_alg hash;
+		struct aead_alg aead;
+	} alg;
+	__be32 desc_hdr_template;
+};
+
+static struct talitos_alg_template driver_algs[] = {
+	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_AESU |
+		                     DESC_HDR_MODE0_AESU_CBC |
+		                     DESC_HDR_SEL1_MDEUA |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CBC |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_DEU |
+		                     DESC_HDR_MODE0_DEU_CBC |
+		                     DESC_HDR_MODE0_DEU_3DES |
+		                     DESC_HDR_SEL1_MDEUA |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU |
+				     DESC_HDR_MODE0_DEU_CBC |
+				     DESC_HDR_MODE0_DEU_3DES |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
+	},
+	{       .type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CBC |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+	},
+	{       .type = CRYPTO_ALG_TYPE_AEAD,
+		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CBC |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_DEU |
+		                     DESC_HDR_MODE0_DEU_CBC |
+		                     DESC_HDR_MODE0_DEU_3DES |
+		                     DESC_HDR_SEL1_MDEUA |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU |
+				     DESC_HDR_MODE0_DEU_CBC |
+				     DESC_HDR_MODE0_DEU_3DES |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_AESU |
+		                     DESC_HDR_MODE0_AESU_CBC |
+		                     DESC_HDR_SEL1_MDEUA |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CBC |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_DEU |
+		                     DESC_HDR_MODE0_DEU_CBC |
+		                     DESC_HDR_MODE0_DEU_3DES |
+		                     DESC_HDR_SEL1_MDEUA |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU |
+				     DESC_HDR_MODE0_DEU_CBC |
+				     DESC_HDR_MODE0_DEU_3DES |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_AESU |
+		                     DESC_HDR_MODE0_AESU_CBC |
+		                     DESC_HDR_SEL1_MDEUB |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_DEU |
+		                     DESC_HDR_MODE0_DEU_CBC |
+		                     DESC_HDR_MODE0_DEU_3DES |
+		                     DESC_HDR_SEL1_MDEUB |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_AESU |
+		                     DESC_HDR_MODE0_AESU_CBC |
+		                     DESC_HDR_SEL1_MDEUB |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_DEU |
+		                     DESC_HDR_MODE0_DEU_CBC |
+		                     DESC_HDR_MODE0_DEU_3DES |
+		                     DESC_HDR_SEL1_MDEUB |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_AESU |
+		                     DESC_HDR_MODE0_AESU_CBC |
+		                     DESC_HDR_SEL1_MDEUA |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CBC |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+			             DESC_HDR_SEL0_DEU |
+		                     DESC_HDR_MODE0_DEU_CBC |
+		                     DESC_HDR_MODE0_DEU_3DES |
+		                     DESC_HDR_SEL1_MDEUA |
+		                     DESC_HDR_MODE1_MDEU_INIT |
+		                     DESC_HDR_MODE1_MDEU_PAD |
+		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AEAD,
+		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU |
+				     DESC_HDR_MODE0_DEU_CBC |
+				     DESC_HDR_MODE0_DEU_3DES |
+				     DESC_HDR_SEL1_MDEUA |
+				     DESC_HDR_MODE1_MDEU_INIT |
+				     DESC_HDR_MODE1_MDEU_PAD |
+				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
+	},
+	/* ABLKCIPHER algorithms. */
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "ecb-aes-talitos",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "cbc-aes-talitos",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                                     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CBC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ctr(aes)",
+			.cra_driver_name = "ctr-aes-talitos",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CTR,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "ecb-des-talitos",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = DES_KEY_SIZE,
+				.max_keysize = DES_KEY_SIZE,
+				.ivsize = DES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "cbc-des-talitos",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = DES_KEY_SIZE,
+				.max_keysize = DES_KEY_SIZE,
+				.ivsize = DES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU |
+				     DESC_HDR_MODE0_DEU_CBC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "ecb-3des-talitos",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = DES3_EDE_KEY_SIZE,
+				.max_keysize = DES3_EDE_KEY_SIZE,
+				.ivsize = DES3_EDE_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_DEU |
+				     DESC_HDR_MODE0_DEU_3DES,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "cbc-3des-talitos",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                                     CRYPTO_ALG_ASYNC,
+			.cra_ablkcipher = {
+				.min_keysize = DES3_EDE_KEY_SIZE,
+				.max_keysize = DES3_EDE_KEY_SIZE,
+				.ivsize = DES3_EDE_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+			             DESC_HDR_SEL0_DEU |
+		                     DESC_HDR_MODE0_DEU_CBC |
+		                     DESC_HDR_MODE0_DEU_3DES,
+	},
+	/* AHASH algorithms. */
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = MD5_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "md5-talitos",
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_MD5,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "sha1-talitos",
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA1,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "sha224",
+				.cra_driver_name = "sha224-talitos",
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA224,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "sha256-talitos",
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA256,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA384_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "sha384",
+				.cra_driver_name = "sha384-talitos",
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUB |
+				     DESC_HDR_MODE0_MDEUB_SHA384,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA512_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "sha512",
+				.cra_driver_name = "sha512-talitos",
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUB |
+				     DESC_HDR_MODE0_MDEUB_SHA512,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = MD5_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "hmac(md5)",
+				.cra_driver_name = "hmac-md5-talitos",
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_MD5,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "hmac(sha1)",
+				.cra_driver_name = "hmac-sha1-talitos",
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA1,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "hmac(sha224)",
+				.cra_driver_name = "hmac-sha224-talitos",
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA224,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "hmac(sha256)",
+				.cra_driver_name = "hmac-sha256-talitos",
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA256,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA384_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "hmac(sha384)",
+				.cra_driver_name = "hmac-sha384-talitos",
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUB |
+				     DESC_HDR_MODE0_MDEUB_SHA384,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.halg.digestsize = SHA512_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
+			.halg.base = {
+				.cra_name = "hmac(sha512)",
+				.cra_driver_name = "hmac-sha512-talitos",
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUB |
+				     DESC_HDR_MODE0_MDEUB_SHA512,
+	}
+};
+
+struct talitos_crypto_alg {
+	struct list_head entry;
+	struct device *dev;
+	struct talitos_alg_template algt;
+};
+
+static int talitos_init_common(struct talitos_ctx *ctx,
+			       struct talitos_crypto_alg *talitos_alg)
+{
+	struct talitos_private *priv;
+
+	/* update context with ptr to dev */
+	ctx->dev = talitos_alg->dev;
+
+	/* assign SEC channel to tfm in round-robin fashion */
+	priv = dev_get_drvdata(ctx->dev);
+	ctx->ch = atomic_inc_return(&priv->last_chan) &
+		  (priv->num_channels - 1);
+
+	/* copy descriptor header template value */
+	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+
+	/* select done notification */
+	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
+
+	return 0;
+}
+
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct talitos_crypto_alg *talitos_alg;
+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+		talitos_alg = container_of(__crypto_ahash_alg(alg),
+					   struct talitos_crypto_alg,
+					   algt.alg.hash);
+	else
+		talitos_alg = container_of(alg, struct talitos_crypto_alg,
+					   algt.alg.crypto);
+
+	return talitos_init_common(ctx, talitos_alg);
+}
+
+static int talitos_cra_init_aead(struct crypto_aead *tfm)
+{
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct talitos_crypto_alg *talitos_alg;
+	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+
+	talitos_alg = container_of(alg, struct talitos_crypto_alg,
+				   algt.alg.aead);
+
+	return talitos_init_common(ctx, talitos_alg);
+}
+
+static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	talitos_cra_init(tfm);
+
+	ctx->keylen = 0;
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct talitos_ahash_req_ctx));
+
+	return 0;
+}
+
+static void talitos_cra_exit(struct crypto_tfm *tfm)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct device *dev = ctx->dev;
+
+	if (ctx->keylen)
+		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+}
+
+/*
+ * given the alg's descriptor header template, determine whether descriptor
+ * type and primary/secondary execution units required match the hw
+ * capabilities description provided in the device tree node.
+ */
+static int hw_supports(struct device *dev, __be32 desc_hdr_template)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int ret;
+
+	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
+	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
+
+	if (SECONDARY_EU(desc_hdr_template))
+		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
+		              & priv->exec_units);
+
+	return ret;
+}
+
+static int talitos_remove(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	struct talitos_crypto_alg *t_alg, *n;
+	int i;
+
+	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+		switch (t_alg->algt.type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			crypto_unregister_aead(&t_alg->algt.alg.aead);
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&t_alg->algt.alg.hash);
+			break;
+		}
+		list_del(&t_alg->entry);
+	}
+
+	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
+		talitos_unregister_rng(dev);
+
+	for (i = 0; i < 2; i++)
+		if (priv->irq[i]) {
+			free_irq(priv->irq[i], dev);
+			irq_dispose_mapping(priv->irq[i]);
+		}
+
+	tasklet_kill(&priv->done_task[0]);
+	if (priv->irq[1])
+		tasklet_kill(&priv->done_task[1]);
+
+	return 0;
+}
+
+static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+						    struct talitos_alg_template
+						           *template)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	struct talitos_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
+			     GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	t_alg->algt = *template;
+
+	switch (t_alg->algt.type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg = &t_alg->algt.alg.crypto;
+		alg->cra_init = talitos_cra_init;
+		alg->cra_exit = talitos_cra_exit;
+		alg->cra_type = &crypto_ablkcipher_type;
+		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
+		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
+		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
+		alg->cra_ablkcipher.geniv = "eseqiv";
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		alg = &t_alg->algt.alg.aead.base;
+		alg->cra_exit = talitos_cra_exit;
+		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
+		t_alg->algt.alg.aead.setkey = aead_setkey;
+		t_alg->algt.alg.aead.encrypt = aead_encrypt;
+		t_alg->algt.alg.aead.decrypt = aead_decrypt;
+		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
+			devm_kfree(dev, t_alg);
+			return ERR_PTR(-ENOTSUPP);
+		}
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		alg = &t_alg->algt.alg.hash.halg.base;
+		alg->cra_init = talitos_cra_init_ahash;
+		alg->cra_exit = talitos_cra_exit;
+		t_alg->algt.alg.hash.init = ahash_init;
+		t_alg->algt.alg.hash.update = ahash_update;
+		t_alg->algt.alg.hash.final = ahash_final;
+		t_alg->algt.alg.hash.finup = ahash_finup;
+		t_alg->algt.alg.hash.digest = ahash_digest;
+		if (!strncmp(alg->cra_name, "hmac", 4))
+			t_alg->algt.alg.hash.setkey = ahash_setkey;
+		t_alg->algt.alg.hash.import = ahash_import;
+		t_alg->algt.alg.hash.export = ahash_export;
+
+		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
+		    !strncmp(alg->cra_name, "hmac", 4)) {
+			devm_kfree(dev, t_alg);
+			return ERR_PTR(-ENOTSUPP);
+		}
+		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+		    (!strcmp(alg->cra_name, "sha224") ||
+		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
+			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
+			t_alg->algt.desc_hdr_template =
+					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+					DESC_HDR_SEL0_MDEUA |
+					DESC_HDR_MODE0_MDEU_SHA256;
+		}
+		break;
+	default:
+		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
+		devm_kfree(dev, t_alg);
+		return ERR_PTR(-EINVAL);
+	}
+
+	alg->cra_module = THIS_MODULE;
+	if (t_alg->algt.priority)
+		alg->cra_priority = t_alg->algt.priority;
+	else
+		alg->cra_priority = TALITOS_CRA_PRIORITY;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct talitos_ctx);
+	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+	t_alg->dev = dev;
+
+	return t_alg;
+}
+
+static int talitos_probe_irq(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct device_node *np = ofdev->dev.of_node;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int err;
+	bool is_sec1 = has_ftr_sec1(priv);
+
+	priv->irq[0] = irq_of_parse_and_map(np, 0);
+	if (!priv->irq[0]) {
+		dev_err(dev, "failed to map irq\n");
+		return -EINVAL;
+	}
+	if (is_sec1) {
+		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
+				  dev_driver_string(dev), dev);
+		goto primary_out;
+	}
+
+	priv->irq[1] = irq_of_parse_and_map(np, 1);
+
+	/* get the primary irq line */
+	if (!priv->irq[1]) {
+		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
+				  dev_driver_string(dev), dev);
+		goto primary_out;
+	}
+
+	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
+			  dev_driver_string(dev), dev);
+	if (err)
+		goto primary_out;
+
+	/* get the secondary irq line */
+	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
+			  dev_driver_string(dev), dev);
+	if (err) {
+		dev_err(dev, "failed to request secondary irq\n");
+		irq_dispose_mapping(priv->irq[1]);
+		priv->irq[1] = 0;
+	}
+
+	return err;
+
+primary_out:
+	if (err) {
+		dev_err(dev, "failed to request primary irq\n");
+		irq_dispose_mapping(priv->irq[0]);
+		priv->irq[0] = 0;
+	}
+
+	return err;
+}
+
+static int talitos_probe(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct device_node *np = ofdev->dev.of_node;
+	struct talitos_private *priv;
+	int i, err;
+	int stride;
+	struct resource *res;
+
+	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&priv->alg_list);
+
+	dev_set_drvdata(dev, priv);
+
+	priv->ofdev = ofdev;
+
+	spin_lock_init(&priv->reg_lock);
+
+	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENXIO;
+	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
+	if (!priv->reg) {
+		dev_err(dev, "failed to of_iomap\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	/* get SEC version capabilities from device tree */
+	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
+	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
+	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
+	of_property_read_u32(np, "fsl,descriptor-types-mask",
+			     &priv->desc_types);
+
+	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
+	    !priv->exec_units || !priv->desc_types) {
+		dev_err(dev, "invalid property data in device tree node\n");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (of_device_is_compatible(np, "fsl,sec3.0"))
+		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
+
+	if (of_device_is_compatible(np, "fsl,sec2.1"))
+		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
+				  TALITOS_FTR_SHA224_HWINIT |
+				  TALITOS_FTR_HMAC_OK;
+
+	if (of_device_is_compatible(np, "fsl,sec1.0"))
+		priv->features |= TALITOS_FTR_SEC1;
+
+	if (of_device_is_compatible(np, "fsl,sec1.2")) {
+		priv->reg_deu = priv->reg + TALITOS12_DEU;
+		priv->reg_aesu = priv->reg + TALITOS12_AESU;
+		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
+		stride = TALITOS1_CH_STRIDE;
+	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
+		priv->reg_deu = priv->reg + TALITOS10_DEU;
+		priv->reg_aesu = priv->reg + TALITOS10_AESU;
+		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
+		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
+		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
+		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
+		stride = TALITOS1_CH_STRIDE;
+	} else {
+		priv->reg_deu = priv->reg + TALITOS2_DEU;
+		priv->reg_aesu = priv->reg + TALITOS2_AESU;
+		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
+		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
+		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
+		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
+		priv->reg_keu = priv->reg + TALITOS2_KEU;
+		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
+		stride = TALITOS2_CH_STRIDE;
+	}
+
+	err = talitos_probe_irq(ofdev);
+	if (err)
+		goto err_out;
+
+	if (of_device_is_compatible(np, "fsl,sec1.0")) {
+		if (priv->num_channels == 1)
+			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
+				     (unsigned long)dev);
+		else
+			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
+				     (unsigned long)dev);
+	} else {
+		if (priv->irq[1]) {
+			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
+				     (unsigned long)dev);
+			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
+				     (unsigned long)dev);
+		} else if (priv->num_channels == 1) {
+			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
+				     (unsigned long)dev);
+		} else {
+			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
+				     (unsigned long)dev);
+		}
+	}
+
+	priv->chan = devm_kcalloc(dev,
+				  priv->num_channels,
+				  sizeof(struct talitos_channel),
+				  GFP_KERNEL);
+	if (!priv->chan) {
+		dev_err(dev, "failed to allocate channel management space\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
+
+	for (i = 0; i < priv->num_channels; i++) {
+		priv->chan[i].reg = priv->reg + stride * (i + 1);
+		if (!priv->irq[1] || !(i & 1))
+			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
+
+		spin_lock_init(&priv->chan[i].head_lock);
+		spin_lock_init(&priv->chan[i].tail_lock);
+
+		priv->chan[i].fifo = devm_kcalloc(dev,
+						priv->fifo_len,
+						sizeof(struct talitos_request),
+						GFP_KERNEL);
+		if (!priv->chan[i].fifo) {
+			dev_err(dev, "failed to allocate request fifo %d\n", i);
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		atomic_set(&priv->chan[i].submit_count,
+			   -(priv->chfifo_len - 1));
+	}
+
+	dma_set_mask(dev, DMA_BIT_MASK(36));
+
+	/* reset and initialize the h/w */
+	err = init_device(dev);
+	if (err) {
+		dev_err(dev, "failed to initialize device\n");
+		goto err_out;
+	}
+
+	/* register the RNG, if available */
+	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
+		err = talitos_register_rng(dev);
+		if (err) {
+			dev_err(dev, "failed to register hwrng: %d\n", err);
+			goto err_out;
+		} else
+			dev_info(dev, "hwrng\n");
+	}
+
+	/* register crypto algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
+			struct talitos_crypto_alg *t_alg;
+			struct crypto_alg *alg = NULL;
+
+			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
+			if (IS_ERR(t_alg)) {
+				err = PTR_ERR(t_alg);
+				if (err == -ENOTSUPP)
+					continue;
+				goto err_out;
+			}
+
+			switch (t_alg->algt.type) {
+			case CRYPTO_ALG_TYPE_ABLKCIPHER:
+				err = crypto_register_alg(
+						&t_alg->algt.alg.crypto);
+				alg = &t_alg->algt.alg.crypto;
+				break;
+
+			case CRYPTO_ALG_TYPE_AEAD:
+				err = crypto_register_aead(
+					&t_alg->algt.alg.aead);
+				alg = &t_alg->algt.alg.aead.base;
+				break;
+
+			case CRYPTO_ALG_TYPE_AHASH:
+				err = crypto_register_ahash(
+						&t_alg->algt.alg.hash);
+				alg = &t_alg->algt.alg.hash.halg.base;
+				break;
+			}
+			if (err) {
+				dev_err(dev, "%s alg registration failed\n",
+					alg->cra_driver_name);
+				devm_kfree(dev, t_alg);
+			} else
+				list_add_tail(&t_alg->entry, &priv->alg_list);
+		}
+	}
+	if (!list_empty(&priv->alg_list))
+		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
+			 (char *)of_get_property(np, "compatible", NULL));
+
+	return 0;
+
+err_out:
+	talitos_remove(ofdev);
+
+	return err;
+}
+
+static const struct of_device_id talitos_match[] = {
+#ifdef CONFIG_CRYPTO_DEV_TALITOS1
+	{
+		.compatible = "fsl,sec1.0",
+	},
+#endif
+#ifdef CONFIG_CRYPTO_DEV_TALITOS2
+	{
+		.compatible = "fsl,sec2.0",
+	},
+#endif
+	{},
+};
+MODULE_DEVICE_TABLE(of, talitos_match);
+
+static struct platform_driver talitos_driver = {
+	.driver = {
+		.name = "talitos",
+		.of_match_table = talitos_match,
+	},
+	.probe = talitos_probe,
+	.remove = talitos_remove,
+};
+
+module_platform_driver(talitos_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
+MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
new file mode 100644
index 0000000..a65a63e
--- /dev/null
+++ b/drivers/crypto/talitos.h
@@ -0,0 +1,416 @@
+/*
+ * Freescale SEC (talitos) device register and descriptor header defines
+ *
+ * Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define TALITOS_TIMEOUT 100000
+#define TALITOS1_MAX_DATA_LEN 32768
+#define TALITOS2_MAX_DATA_LEN 65535
+
+#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
+#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
+#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
+
+/* descriptor pointer entry */
+struct talitos_ptr {
+	union {
+		struct {		/* SEC2 format */
+			__be16 len;     /* length */
+			u8 j_extent;    /* jump to sg link table and/or extent*/
+			u8 eptr;        /* extended address */
+		};
+		struct {			/* SEC1 format */
+			__be16 res;
+			__be16 len1;	/* length */
+		};
+	};
+	__be32 ptr;     /* address */
+};
+
+/* descriptor */
+struct talitos_desc {
+	__be32 hdr;                     /* header high bits */
+	union {
+		__be32 hdr_lo;		/* header low bits */
+		__be32 hdr1;		/* header for SEC1 */
+	};
+	struct talitos_ptr ptr[7];      /* ptr/len pair array */
+	__be32 next_desc;		/* next descriptor (SEC1) */
+};
+
+#define TALITOS_DESC_SIZE	(sizeof(struct talitos_desc) - sizeof(__be32))
+
+/**
+ * talitos_request - descriptor submission request
+ * @desc: descriptor pointer (kernel virtual)
+ * @dma_desc: descriptor's physical bus address
+ * @callback: whom to call when descriptor processing is done
+ * @context: caller context (optional)
+ */
+struct talitos_request {
+	struct talitos_desc *desc;
+	dma_addr_t dma_desc;
+	void (*callback) (struct device *dev, struct talitos_desc *desc,
+			  void *context, int error);
+	void *context;
+};
+
+/* per-channel fifo management */
+struct talitos_channel {
+	void __iomem *reg;
+
+	/* request fifo */
+	struct talitos_request *fifo;
+
+	/* number of requests pending in channel h/w fifo */
+	atomic_t submit_count ____cacheline_aligned;
+
+	/* request submission (head) lock */
+	spinlock_t head_lock ____cacheline_aligned;
+	/* index to next free descriptor request */
+	int head;
+
+	/* request release (tail) lock */
+	spinlock_t tail_lock ____cacheline_aligned;
+	/* index to next in-progress/done descriptor request */
+	int tail;
+};
+
+struct talitos_private {
+	struct device *dev;
+	struct platform_device *ofdev;
+	void __iomem *reg;
+	void __iomem *reg_deu;
+	void __iomem *reg_aesu;
+	void __iomem *reg_mdeu;
+	void __iomem *reg_afeu;
+	void __iomem *reg_rngu;
+	void __iomem *reg_pkeu;
+	void __iomem *reg_keu;
+	void __iomem *reg_crcu;
+	int irq[2];
+
+	/* SEC global registers lock  */
+	spinlock_t reg_lock ____cacheline_aligned;
+
+	/* SEC version geometry (from device tree node) */
+	unsigned int num_channels;
+	unsigned int chfifo_len;
+	unsigned int exec_units;
+	unsigned int desc_types;
+
+	/* SEC Compatibility info */
+	unsigned long features;
+
+	/*
+	 * length of the request fifo
+	 * fifo_len is chfifo_len rounded up to next power of 2
+	 * so we can use bitwise ops to wrap
+	 */
+	unsigned int fifo_len;
+
+	struct talitos_channel *chan;
+
+	/* next channel to be assigned next incoming descriptor */
+	atomic_t last_chan ____cacheline_aligned;
+
+	/* request callback tasklet */
+	struct tasklet_struct done_task[2];
+
+	/* list of registered algorithms */
+	struct list_head alg_list;
+
+	/* hwrng device */
+	struct hwrng rng;
+	bool rng_registered;
+};
+
+extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
+			  void (*callback)(struct device *dev,
+					   struct talitos_desc *desc,
+					   void *context, int error),
+			  void *context);
+
+/* .features flag */
+#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
+#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+#define TALITOS_FTR_SHA224_HWINIT 0x00000004
+#define TALITOS_FTR_HMAC_OK 0x00000008
+#define TALITOS_FTR_SEC1 0x00000010
+
+/*
+ * If both CONFIG_CRYPTO_DEV_TALITOS1 and CONFIG_CRYPTO_DEV_TALITOS2 are
+ * defined, we check the features which are set according to the device tree.
+ * Otherwise, we answer true or false directly
+ */
+static inline bool has_ftr_sec1(struct talitos_private *priv)
+{
+#if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2)
+	return priv->features & TALITOS_FTR_SEC1 ? true : false;
+#elif defined(CONFIG_CRYPTO_DEV_TALITOS1)
+	return true;
+#else
+	return false;
+#endif
+}
+
+/*
+ * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register
+ */
+
+#define ISR1_FORMAT(x)			(((x) << 28) | ((x) << 16))
+#define ISR2_FORMAT(x)			(((x) << 4) | (x))
+
+/* global register offset addresses */
+#define TALITOS_MCR			0x1030  /* master control register */
+#define   TALITOS_MCR_RCA0		(1 << 15) /* remap channel 0 */
+#define   TALITOS_MCR_RCA1		(1 << 14) /* remap channel 1 */
+#define   TALITOS_MCR_RCA2		(1 << 13) /* remap channel 2 */
+#define   TALITOS_MCR_RCA3		(1 << 12) /* remap channel 3 */
+#define   TALITOS1_MCR_SWR		0x1000000     /* s/w reset */
+#define   TALITOS2_MCR_SWR		0x1     /* s/w reset */
+#define TALITOS_MCR_LO			0x1034
+#define TALITOS_IMR			0x1008  /* interrupt mask register */
+/* enable channel IRQs */
+#define   TALITOS1_IMR_INIT		ISR1_FORMAT(0xf)
+#define   TALITOS1_IMR_DONE		ISR1_FORMAT(0x5) /* done IRQs */
+/* enable channel IRQs */
+#define   TALITOS2_IMR_INIT		(ISR2_FORMAT(0xf) | 0x10000)
+#define   TALITOS2_IMR_DONE		ISR1_FORMAT(0x5) /* done IRQs */
+#define TALITOS_IMR_LO			0x100C
+#define   TALITOS1_IMR_LO_INIT		0x2000000 /* allow RNGU error IRQs */
+#define   TALITOS2_IMR_LO_INIT		0x20000 /* allow RNGU error IRQs */
+#define TALITOS_ISR			0x1010  /* interrupt status register */
+#define   TALITOS1_ISR_4CHERR		ISR1_FORMAT(0xa) /* 4 ch errors mask */
+#define   TALITOS1_ISR_4CHDONE		ISR1_FORMAT(0x5) /* 4 ch done mask */
+#define   TALITOS1_ISR_CH_0_ERR		(2 << 28) /* ch 0 errors mask */
+#define   TALITOS1_ISR_CH_0_DONE	(1 << 28) /* ch 0 done mask */
+#define   TALITOS1_ISR_TEA_ERR		0x00000040
+#define   TALITOS2_ISR_4CHERR		ISR2_FORMAT(0xa) /* 4 ch errors mask */
+#define   TALITOS2_ISR_4CHDONE		ISR2_FORMAT(0x5) /* 4 ch done mask */
+#define   TALITOS2_ISR_CH_0_ERR		2 /* ch 0 errors mask */
+#define   TALITOS2_ISR_CH_0_DONE	1 /* ch 0 done mask */
+#define   TALITOS2_ISR_CH_0_2_ERR	ISR2_FORMAT(0x2) /* ch 0, 2 err mask */
+#define   TALITOS2_ISR_CH_0_2_DONE	ISR2_FORMAT(0x1) /* ch 0, 2 done mask */
+#define   TALITOS2_ISR_CH_1_3_ERR	ISR2_FORMAT(0x8) /* ch 1, 3 err mask */
+#define   TALITOS2_ISR_CH_1_3_DONE	ISR2_FORMAT(0x4) /* ch 1, 3 done mask */
+#define TALITOS_ISR_LO			0x1014
+#define TALITOS_ICR			0x1018  /* interrupt clear register */
+#define TALITOS_ICR_LO			0x101C
+
+/* channel register address stride */
+#define TALITOS_CH_BASE_OFFSET		0x1000	/* default channel map base */
+#define TALITOS1_CH_STRIDE		0x1000
+#define TALITOS2_CH_STRIDE		0x100
+
+/* channel configuration register  */
+#define TALITOS_CCCR			0x8
+#define   TALITOS2_CCCR_CONT		0x2    /* channel continue on SEC2 */
+#define   TALITOS2_CCCR_RESET		0x1    /* channel reset on SEC2 */
+#define TALITOS_CCCR_LO			0xc
+#define   TALITOS_CCCR_LO_IWSE		0x80   /* chan. ICCR writeback enab. */
+#define   TALITOS_CCCR_LO_EAE		0x20   /* extended address enable */
+#define   TALITOS_CCCR_LO_CDWE		0x10   /* chan. done writeback enab. */
+#define   TALITOS_CCCR_LO_NE		0x8    /* fetch next descriptor enab. */
+#define   TALITOS_CCCR_LO_NT		0x4    /* notification type */
+#define   TALITOS_CCCR_LO_CDIE		0x2    /* channel done IRQ enable */
+#define   TALITOS1_CCCR_LO_RESET	0x1    /* channel reset on SEC1 */
+
+/* CCPSR: channel pointer status register */
+#define TALITOS_CCPSR			0x10
+#define TALITOS_CCPSR_LO		0x14
+#define   TALITOS_CCPSR_LO_DOF		0x8000 /* double FF write oflow error */
+#define   TALITOS_CCPSR_LO_SOF		0x4000 /* single FF write oflow error */
+#define   TALITOS_CCPSR_LO_MDTE		0x2000 /* master data transfer error */
+#define   TALITOS_CCPSR_LO_SGDLZ	0x1000 /* s/g data len zero error */
+#define   TALITOS_CCPSR_LO_FPZ		0x0800 /* fetch ptr zero error */
+#define   TALITOS_CCPSR_LO_IDH		0x0400 /* illegal desc hdr error */
+#define   TALITOS_CCPSR_LO_IEU		0x0200 /* invalid EU error */
+#define   TALITOS_CCPSR_LO_EU		0x0100 /* EU error detected */
+#define   TALITOS_CCPSR_LO_GB		0x0080 /* gather boundary error */
+#define   TALITOS_CCPSR_LO_GRL		0x0040 /* gather return/length error */
+#define   TALITOS_CCPSR_LO_SB		0x0020 /* scatter boundary error */
+#define   TALITOS_CCPSR_LO_SRL		0x0010 /* scatter return/length error */
+
+/* channel fetch fifo register */
+#define TALITOS_FF			0x48
+#define TALITOS_FF_LO			0x4c
+
+/* current descriptor pointer register */
+#define TALITOS_CDPR			0x40
+#define TALITOS_CDPR_LO			0x44
+
+/* descriptor buffer register */
+#define TALITOS_DESCBUF			0x80
+#define TALITOS_DESCBUF_LO		0x84
+
+/* gather link table */
+#define TALITOS_GATHER			0xc0
+#define TALITOS_GATHER_LO		0xc4
+
+/* scatter link table */
+#define TALITOS_SCATTER			0xe0
+#define TALITOS_SCATTER_LO		0xe4
+
+/* execution unit registers base */
+#define TALITOS2_DEU			0x2000
+#define TALITOS2_AESU			0x4000
+#define TALITOS2_MDEU			0x6000
+#define TALITOS2_AFEU			0x8000
+#define TALITOS2_RNGU			0xa000
+#define TALITOS2_PKEU			0xc000
+#define TALITOS2_KEU			0xe000
+#define TALITOS2_CRCU			0xf000
+
+#define TALITOS12_AESU			0x4000
+#define TALITOS12_DEU			0x5000
+#define TALITOS12_MDEU			0x6000
+
+#define TALITOS10_AFEU			0x8000
+#define TALITOS10_DEU			0xa000
+#define TALITOS10_MDEU			0xc000
+#define TALITOS10_RNGU			0xe000
+#define TALITOS10_PKEU			0x10000
+#define TALITOS10_AESU			0x12000
+
+/* execution unit interrupt status registers */
+#define TALITOS_EUDSR			0x10	/* data size */
+#define TALITOS_EUDSR_LO		0x14
+#define TALITOS_EURCR			0x18 /* reset control*/
+#define TALITOS_EURCR_LO		0x1c
+#define TALITOS_EUSR			0x28 /* rng status */
+#define TALITOS_EUSR_LO			0x2c
+#define TALITOS_EUISR			0x30
+#define TALITOS_EUISR_LO		0x34
+#define TALITOS_EUICR			0x38 /* int. control */
+#define TALITOS_EUICR_LO		0x3c
+#define TALITOS_EU_FIFO			0x800 /* output FIFO */
+#define TALITOS_EU_FIFO_LO		0x804 /* output FIFO */
+/* DES unit */
+#define   TALITOS1_DEUICR_KPE		0x00200000 /* Key Parity Error */
+/* message digest unit */
+#define   TALITOS_MDEUICR_LO_ICE	0x4000 /* integrity check IRQ enable */
+/* random number unit */
+#define   TALITOS_RNGUSR_LO_RD		0x1	/* reset done */
+#define   TALITOS_RNGUSR_LO_OFL		0xff0000/* output FIFO length */
+#define   TALITOS_RNGURCR_LO_SR		0x1	/* software reset */
+
+#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256	0x28
+#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512		0x48
+
+/*
+ * talitos descriptor header (hdr) bits
+ */
+
+/* written back when done */
+#define DESC_HDR_DONE			cpu_to_be32(0xff000000)
+#define DESC_HDR_LO_ICCR1_MASK		cpu_to_be32(0x00180000)
+#define DESC_HDR_LO_ICCR1_PASS		cpu_to_be32(0x00080000)
+#define DESC_HDR_LO_ICCR1_FAIL		cpu_to_be32(0x00100000)
+
+/* primary execution unit select */
+#define	DESC_HDR_SEL0_MASK		cpu_to_be32(0xf0000000)
+#define	DESC_HDR_SEL0_AFEU		cpu_to_be32(0x10000000)
+#define	DESC_HDR_SEL0_DEU		cpu_to_be32(0x20000000)
+#define	DESC_HDR_SEL0_MDEUA		cpu_to_be32(0x30000000)
+#define	DESC_HDR_SEL0_MDEUB		cpu_to_be32(0xb0000000)
+#define	DESC_HDR_SEL0_RNG		cpu_to_be32(0x40000000)
+#define	DESC_HDR_SEL0_PKEU		cpu_to_be32(0x50000000)
+#define	DESC_HDR_SEL0_AESU		cpu_to_be32(0x60000000)
+#define	DESC_HDR_SEL0_KEU		cpu_to_be32(0x70000000)
+#define	DESC_HDR_SEL0_CRCU		cpu_to_be32(0x80000000)
+
+/* primary execution unit mode (MODE0) and derivatives */
+#define	DESC_HDR_MODE0_ENCRYPT		cpu_to_be32(0x00100000)
+#define	DESC_HDR_MODE0_AESU_CBC		cpu_to_be32(0x00200000)
+#define	DESC_HDR_MODE0_AESU_CTR		cpu_to_be32(0x00600000)
+#define	DESC_HDR_MODE0_DEU_CBC		cpu_to_be32(0x00400000)
+#define	DESC_HDR_MODE0_DEU_3DES		cpu_to_be32(0x00200000)
+#define	DESC_HDR_MODE0_MDEU_CONT	cpu_to_be32(0x08000000)
+#define	DESC_HDR_MODE0_MDEU_INIT	cpu_to_be32(0x01000000)
+#define	DESC_HDR_MODE0_MDEU_HMAC	cpu_to_be32(0x00800000)
+#define	DESC_HDR_MODE0_MDEU_PAD		cpu_to_be32(0x00400000)
+#define	DESC_HDR_MODE0_MDEU_SHA224	cpu_to_be32(0x00300000)
+#define	DESC_HDR_MODE0_MDEU_MD5		cpu_to_be32(0x00200000)
+#define	DESC_HDR_MODE0_MDEU_SHA256	cpu_to_be32(0x00100000)
+#define	DESC_HDR_MODE0_MDEU_SHA1	cpu_to_be32(0x00000000)
+#define	DESC_HDR_MODE0_MDEUB_SHA384	cpu_to_be32(0x00000000)
+#define	DESC_HDR_MODE0_MDEUB_SHA512	cpu_to_be32(0x00200000)
+#define	DESC_HDR_MODE0_MDEU_MD5_HMAC	(DESC_HDR_MODE0_MDEU_MD5 | \
+					 DESC_HDR_MODE0_MDEU_HMAC)
+#define	DESC_HDR_MODE0_MDEU_SHA256_HMAC	(DESC_HDR_MODE0_MDEU_SHA256 | \
+					 DESC_HDR_MODE0_MDEU_HMAC)
+#define	DESC_HDR_MODE0_MDEU_SHA1_HMAC	(DESC_HDR_MODE0_MDEU_SHA1 | \
+					 DESC_HDR_MODE0_MDEU_HMAC)
+
+/* secondary execution unit select (SEL1) */
+#define	DESC_HDR_SEL1_MASK		cpu_to_be32(0x000f0000)
+#define	DESC_HDR_SEL1_MDEUA		cpu_to_be32(0x00030000)
+#define	DESC_HDR_SEL1_MDEUB		cpu_to_be32(0x000b0000)
+#define	DESC_HDR_SEL1_CRCU		cpu_to_be32(0x00080000)
+
+/* secondary execution unit mode (MODE1) and derivatives */
+#define	DESC_HDR_MODE1_MDEU_CICV	cpu_to_be32(0x00004000)
+#define	DESC_HDR_MODE1_MDEU_INIT	cpu_to_be32(0x00001000)
+#define	DESC_HDR_MODE1_MDEU_HMAC	cpu_to_be32(0x00000800)
+#define	DESC_HDR_MODE1_MDEU_PAD		cpu_to_be32(0x00000400)
+#define	DESC_HDR_MODE1_MDEU_SHA224	cpu_to_be32(0x00000300)
+#define	DESC_HDR_MODE1_MDEU_MD5		cpu_to_be32(0x00000200)
+#define	DESC_HDR_MODE1_MDEU_SHA256	cpu_to_be32(0x00000100)
+#define	DESC_HDR_MODE1_MDEU_SHA1	cpu_to_be32(0x00000000)
+#define	DESC_HDR_MODE1_MDEUB_SHA384	cpu_to_be32(0x00000000)
+#define	DESC_HDR_MODE1_MDEUB_SHA512	cpu_to_be32(0x00000200)
+#define	DESC_HDR_MODE1_MDEU_MD5_HMAC	(DESC_HDR_MODE1_MDEU_MD5 | \
+					 DESC_HDR_MODE1_MDEU_HMAC)
+#define	DESC_HDR_MODE1_MDEU_SHA256_HMAC	(DESC_HDR_MODE1_MDEU_SHA256 | \
+					 DESC_HDR_MODE1_MDEU_HMAC)
+#define	DESC_HDR_MODE1_MDEU_SHA1_HMAC	(DESC_HDR_MODE1_MDEU_SHA1 | \
+					 DESC_HDR_MODE1_MDEU_HMAC)
+#define DESC_HDR_MODE1_MDEU_SHA224_HMAC	(DESC_HDR_MODE1_MDEU_SHA224 | \
+					 DESC_HDR_MODE1_MDEU_HMAC)
+#define DESC_HDR_MODE1_MDEUB_SHA384_HMAC	(DESC_HDR_MODE1_MDEUB_SHA384 | \
+						 DESC_HDR_MODE1_MDEU_HMAC)
+#define DESC_HDR_MODE1_MDEUB_SHA512_HMAC	(DESC_HDR_MODE1_MDEUB_SHA512 | \
+						 DESC_HDR_MODE1_MDEU_HMAC)
+
+/* direction of overall data flow (DIR) */
+#define	DESC_HDR_DIR_INBOUND		cpu_to_be32(0x00000002)
+
+/* request done notification (DN) */
+#define	DESC_HDR_DONE_NOTIFY		cpu_to_be32(0x00000001)
+
+/* descriptor types */
+#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP		cpu_to_be32(0 << 3)
+#define DESC_HDR_TYPE_IPSEC_ESP			cpu_to_be32(1 << 3)
+#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU	cpu_to_be32(2 << 3)
+#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU	cpu_to_be32(4 << 3)
+
+/* link table extent field bits */
+#define DESC_PTR_LNKTBL_JUMP			0x80
+#define DESC_PTR_LNKTBL_RETURN			0x02
+#define DESC_PTR_LNKTBL_NEXT			0x01
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
new file mode 100644
index 0000000..0e338bf
--- /dev/null
+++ b/drivers/crypto/ux500/Kconfig
@@ -0,0 +1,32 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+config CRYPTO_DEV_UX500_CRYP
+	tristate "UX500 crypto driver for CRYP block"
+	depends on CRYPTO_DEV_UX500
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
+	help
+        This selects the crypto driver for the UX500_CRYP hardware. It supports
+        AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
+
+config CRYPTO_DEV_UX500_HASH
+        tristate "UX500 crypto driver for HASH block"
+        depends on CRYPTO_DEV_UX500
+        select CRYPTO_HASH
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+        help
+          This selects the hash driver for the UX500_HASH hardware.
+          Depends on UX500/STM DMA if running in DMA mode.
+
+config CRYPTO_DEV_UX500_DEBUG
+	bool "Activate ux500 platform debug-mode for crypto and hash block"
+	depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
+	help
+	  Say Y if you want to add debug prints to ux500_hash and
+	  ux500_cryp devices.
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
new file mode 100644
index 0000000..b9a365b
--- /dev/null
+++ b/drivers/crypto/ux500/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
new file mode 100644
index 0000000..b497ae3
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -0,0 +1,13 @@
+#/*
+# * Copyright (C) ST-Ericsson SA 2010
+# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
+# * License terms: GNU General Public License (GPL) version 2  */
+
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_cryp_core.o := -DDEBUG
+CFLAGS_cryp.o := -DDEBUG
+CFLAGS_cryp_irq.o := -DDEBUG
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
+ux500_cryp-objs :=  cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
new file mode 100644
index 0000000..00a16ab
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -0,0 +1,387 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+/**
+ * cryp_wait_until_done - wait until the device logic is not busy
+ */
+void cryp_wait_until_done(struct cryp_device_data *device_data)
+{
+	while (cryp_is_logic_busy(device_data))
+		cpu_relax();
+}
+
+/**
+ * cryp_check - This routine checks Peripheral and PCell Id
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_check(struct cryp_device_data *device_data)
+{
+	int peripheralid2 = 0;
+
+	if (NULL == device_data)
+		return -EINVAL;
+
+	peripheralid2 = readl_relaxed(&device_data->base->periphId2);
+
+	if (peripheralid2 != CRYP_PERIPHERAL_ID2_DB8500)
+		return -EPERM;
+
+	/* Check Peripheral and Pcell Id Register for CRYP */
+	if ((CRYP_PERIPHERAL_ID0 ==
+		readl_relaxed(&device_data->base->periphId0))
+	    && (CRYP_PERIPHERAL_ID1 ==
+		    readl_relaxed(&device_data->base->periphId1))
+	    && (CRYP_PERIPHERAL_ID3 ==
+		    readl_relaxed(&device_data->base->periphId3))
+	    && (CRYP_PCELL_ID0 ==
+		    readl_relaxed(&device_data->base->pcellId0))
+	    && (CRYP_PCELL_ID1 ==
+		    readl_relaxed(&device_data->base->pcellId1))
+	    && (CRYP_PCELL_ID2 ==
+		    readl_relaxed(&device_data->base->pcellId2))
+	    && (CRYP_PCELL_ID3 ==
+		    readl_relaxed(&device_data->base->pcellId3))) {
+		return 0;
+	}
+
+	return -EPERM;
+}
+
+/**
+ * cryp_activity - This routine enables/disable the cryptography function.
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_crypen: Enable/Disable functionality
+ */
+void cryp_activity(struct cryp_device_data *device_data,
+		   enum cryp_crypen cryp_crypen)
+{
+	CRYP_PUT_BITS(&device_data->base->cr,
+		      cryp_crypen,
+		      CRYP_CR_CRYPEN_POS,
+		      CRYP_CR_CRYPEN_MASK);
+}
+
+/**
+ * cryp_flush_inoutfifo - Resets both the input and the output FIFOs
+ * @device_data: Pointer to the device data struct for base address.
+ */
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
+{
+	/*
+	 * We always need to disable the hardware before trying to flush the
+	 * FIFO. This is something that isn't written in the design
+	 * specification, but we have been informed by the hardware designers
+	 * that this must be done.
+	 */
+	cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+	cryp_wait_until_done(device_data);
+
+	CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK);
+	/*
+	 * CRYP_SR_INFIFO_READY_MASK is the expected value on the status
+	 * register when starting a new calculation, which means Input FIFO is
+	 * not full and input FIFO is empty.
+	 */
+	while (readl_relaxed(&device_data->base->sr) !=
+	       CRYP_SR_INFIFO_READY_MASK)
+		cpu_relax();
+}
+
+/**
+ * cryp_set_configuration - This routine set the cr CRYP IP
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_config: Pointer to the configuration parameter
+ * @control_register: The control register to be written later on.
+ */
+int cryp_set_configuration(struct cryp_device_data *device_data,
+			   struct cryp_config *cryp_config,
+			   u32 *control_register)
+{
+	u32 cr_for_kse;
+
+	if (NULL == device_data || NULL == cryp_config)
+		return -EINVAL;
+
+	*control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS);
+
+	/* Prepare key for decryption in AES_ECB and AES_CBC mode. */
+	if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) &&
+	    ((CRYP_ALGO_AES_ECB == cryp_config->algomode) ||
+	     (CRYP_ALGO_AES_CBC == cryp_config->algomode))) {
+		cr_for_kse = *control_register;
+		/*
+		 * This seems a bit odd, but it is indeed needed to set this to
+		 * encrypt even though it is a decryption that we are doing. It
+		 * also mentioned in the design spec that you need to do this.
+		 * After the keyprepartion for decrypting is done you should set
+		 * algodir back to decryption, which is done outside this if
+		 * statement.
+		 *
+		 * According to design specification we should set mode ECB
+		 * during key preparation even though we might be running CBC
+		 * when enter this function.
+		 *
+		 * Writing to KSE_ENABLED will drop CRYPEN when key preparation
+		 * is done. Therefore we need to set CRYPEN again outside this
+		 * if statement when running decryption.
+		 */
+		cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) |
+			       (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) |
+			       (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) |
+			       (KSE_ENABLED << CRYP_CR_KSE_POS));
+
+		writel_relaxed(cr_for_kse, &device_data->base->cr);
+		cryp_wait_until_done(device_data);
+	}
+
+	*control_register |=
+		((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) |
+		 (cryp_config->algodir << CRYP_CR_ALGODIR_POS));
+
+	return 0;
+}
+
+/**
+ * cryp_configure_protection - set the protection bits in the CRYP logic.
+ * @device_data: Pointer to the device data struct for base address.
+ * @p_protect_config:	Pointer to the protection mode and
+ *			secure mode configuration
+ */
+int cryp_configure_protection(struct cryp_device_data *device_data,
+			      struct cryp_protection_config *p_protect_config)
+{
+	if (NULL == p_protect_config)
+		return -EINVAL;
+
+	CRYP_WRITE_BIT(&device_data->base->cr,
+		       (u32) p_protect_config->secure_access,
+		       CRYP_CR_SECURE_MASK);
+	CRYP_PUT_BITS(&device_data->base->cr,
+		      p_protect_config->privilege_access,
+		      CRYP_CR_PRLG_POS,
+		      CRYP_CR_PRLG_MASK);
+
+	return 0;
+}
+
+/**
+ * cryp_is_logic_busy - returns the busy status of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_is_logic_busy(struct cryp_device_data *device_data)
+{
+	return CRYP_TEST_BITS(&device_data->base->sr,
+			      CRYP_SR_BUSY_MASK);
+}
+
+/**
+ * cryp_configure_for_dma - configures the CRYP IP for DMA operation
+ * @device_data: Pointer to the device data struct for base address.
+ * @dma_req: Specifies the DMA request type value.
+ */
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+			    enum cryp_dma_req_type dma_req)
+{
+	CRYP_SET_BITS(&device_data->base->dmacr,
+		      (u32) dma_req);
+}
+
+/**
+ * cryp_configure_key_values - configures the key values for CRYP operations
+ * @device_data: Pointer to the device data struct for base address.
+ * @key_reg_index: Key value index register
+ * @key_value: The key value struct
+ */
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+			      enum cryp_key_reg_index key_reg_index,
+			      struct cryp_key_value key_value)
+{
+	while (cryp_is_logic_busy(device_data))
+		cpu_relax();
+
+	switch (key_reg_index) {
+	case CRYP_KEY_REG_1:
+		writel_relaxed(key_value.key_value_left,
+				&device_data->base->key_1_l);
+		writel_relaxed(key_value.key_value_right,
+				&device_data->base->key_1_r);
+		break;
+	case CRYP_KEY_REG_2:
+		writel_relaxed(key_value.key_value_left,
+				&device_data->base->key_2_l);
+		writel_relaxed(key_value.key_value_right,
+				&device_data->base->key_2_r);
+		break;
+	case CRYP_KEY_REG_3:
+		writel_relaxed(key_value.key_value_left,
+				&device_data->base->key_3_l);
+		writel_relaxed(key_value.key_value_right,
+				&device_data->base->key_3_r);
+		break;
+	case CRYP_KEY_REG_4:
+		writel_relaxed(key_value.key_value_left,
+				&device_data->base->key_4_l);
+		writel_relaxed(key_value.key_value_right,
+				&device_data->base->key_4_r);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cryp_configure_init_vector - configures the initialization vector register
+ * @device_data: Pointer to the device data struct for base address.
+ * @init_vector_index: Specifies the index of the init vector.
+ * @init_vector_value: Specifies the value for the init vector.
+ */
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+			       enum cryp_init_vector_index
+			       init_vector_index,
+			       struct cryp_init_vector_value
+			       init_vector_value)
+{
+	while (cryp_is_logic_busy(device_data))
+		cpu_relax();
+
+	switch (init_vector_index) {
+	case CRYP_INIT_VECTOR_INDEX_0:
+		writel_relaxed(init_vector_value.init_value_left,
+		       &device_data->base->init_vect_0_l);
+		writel_relaxed(init_vector_value.init_value_right,
+		       &device_data->base->init_vect_0_r);
+		break;
+	case CRYP_INIT_VECTOR_INDEX_1:
+		writel_relaxed(init_vector_value.init_value_left,
+		       &device_data->base->init_vect_1_l);
+		writel_relaxed(init_vector_value.init_value_right,
+		       &device_data->base->init_vect_1_r);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cryp_save_device_context -	Store hardware registers and
+ *				other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+			      struct cryp_device_context *ctx,
+			      int cryp_mode)
+{
+	enum cryp_algo_mode algomode;
+	struct cryp_register __iomem *src_reg = device_data->base;
+	struct cryp_config *config =
+		(struct cryp_config *)device_data->current_ctx;
+
+	/*
+	 * Always start by disable the hardware and wait for it to finish the
+	 * ongoing calculations before trying to reprogram it.
+	 */
+	cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+	cryp_wait_until_done(device_data);
+
+	if (cryp_mode == CRYP_MODE_DMA)
+		cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH);
+
+	if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0)
+		ctx->din = readl_relaxed(&src_reg->din);
+
+	ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK;
+
+	switch (config->keysize) {
+	case CRYP_KEY_SIZE_256:
+		ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
+		ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
+
+	case CRYP_KEY_SIZE_192:
+		ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
+		ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
+
+	case CRYP_KEY_SIZE_128:
+		ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
+		ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
+
+	default:
+		ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
+		ctx->key_1_r = readl_relaxed(&src_reg->key_1_r);
+	}
+
+	/* Save IV for CBC mode for both AES and DES. */
+	algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS);
+	if (algomode == CRYP_ALGO_TDES_CBC ||
+	    algomode == CRYP_ALGO_DES_CBC ||
+	    algomode == CRYP_ALGO_AES_CBC) {
+		ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l);
+		ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r);
+		ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l);
+		ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r);
+	}
+}
+
+/**
+ * cryp_restore_device_context -	Restore hardware registers and
+ *					other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+				 struct cryp_device_context *ctx)
+{
+	struct cryp_register __iomem *reg = device_data->base;
+	struct cryp_config *config =
+		(struct cryp_config *)device_data->current_ctx;
+
+	/*
+	 * Fall through for all items in switch statement. DES is captured in
+	 * the default.
+	 */
+	switch (config->keysize) {
+	case CRYP_KEY_SIZE_256:
+		writel_relaxed(ctx->key_4_l, &reg->key_4_l);
+		writel_relaxed(ctx->key_4_r, &reg->key_4_r);
+
+	case CRYP_KEY_SIZE_192:
+		writel_relaxed(ctx->key_3_l, &reg->key_3_l);
+		writel_relaxed(ctx->key_3_r, &reg->key_3_r);
+
+	case CRYP_KEY_SIZE_128:
+		writel_relaxed(ctx->key_2_l, &reg->key_2_l);
+		writel_relaxed(ctx->key_2_r, &reg->key_2_r);
+
+	default:
+		writel_relaxed(ctx->key_1_l, &reg->key_1_l);
+		writel_relaxed(ctx->key_1_r, &reg->key_1_r);
+	}
+
+	/* Restore IV for CBC mode for AES and DES. */
+	if (config->algomode == CRYP_ALGO_TDES_CBC ||
+	    config->algomode == CRYP_ALGO_DES_CBC ||
+	    config->algomode == CRYP_ALGO_AES_CBC) {
+		writel_relaxed(ctx->init_vect_0_l, &reg->init_vect_0_l);
+		writel_relaxed(ctx->init_vect_0_r, &reg->init_vect_0_r);
+		writel_relaxed(ctx->init_vect_1_l, &reg->init_vect_1_l);
+		writel_relaxed(ctx->init_vect_1_r, &reg->init_vect_1_r);
+	}
+}
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
new file mode 100644
index 0000000..d1d6606
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -0,0 +1,313 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_H_
+#define _CRYP_H_
+
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/klist.h>
+#include <linux/mutex.h>
+
+#define DEV_DBG_NAME "crypX crypX:"
+
+/* CRYP enable/disable */
+enum cryp_crypen {
+	CRYP_CRYPEN_DISABLE = 0,
+	CRYP_CRYPEN_ENABLE = 1
+};
+
+/* CRYP Start Computation enable/disable */
+enum cryp_start {
+	CRYP_START_DISABLE = 0,
+	CRYP_START_ENABLE = 1
+};
+
+/* CRYP Init Signal enable/disable */
+enum cryp_init {
+	CRYP_INIT_DISABLE = 0,
+	CRYP_INIT_ENABLE = 1
+};
+
+/* Cryp State enable/disable */
+enum cryp_state {
+	CRYP_STATE_DISABLE = 0,
+	CRYP_STATE_ENABLE = 1
+};
+
+/* Key preparation bit enable */
+enum cryp_key_prep {
+	KSE_DISABLED = 0,
+	KSE_ENABLED = 1
+};
+
+/* Key size for AES */
+#define	CRYP_KEY_SIZE_128 (0)
+#define	CRYP_KEY_SIZE_192 (1)
+#define	CRYP_KEY_SIZE_256 (2)
+
+/* AES modes */
+enum cryp_algo_mode {
+	CRYP_ALGO_TDES_ECB,
+	CRYP_ALGO_TDES_CBC,
+	CRYP_ALGO_DES_ECB,
+	CRYP_ALGO_DES_CBC,
+	CRYP_ALGO_AES_ECB,
+	CRYP_ALGO_AES_CBC,
+	CRYP_ALGO_AES_CTR,
+	CRYP_ALGO_AES_XTS
+};
+
+/* Cryp Encryption or Decryption */
+enum cryp_algorithm_dir {
+	CRYP_ALGORITHM_ENCRYPT,
+	CRYP_ALGORITHM_DECRYPT
+};
+
+/* Hardware access method */
+enum cryp_mode {
+	CRYP_MODE_POLLING,
+	CRYP_MODE_INTERRUPT,
+	CRYP_MODE_DMA
+};
+
+/**
+ * struct cryp_config -
+ * @keysize: Key size for AES
+ * @algomode: AES modes
+ * @algodir: Cryp Encryption or Decryption
+ *
+ * CRYP configuration structure to be passed to set configuration
+ */
+struct cryp_config {
+	int keysize;
+	enum cryp_algo_mode algomode;
+	enum cryp_algorithm_dir algodir;
+};
+
+/**
+ * struct cryp_protection_config -
+ * @privilege_access: Privileged cryp state enable/disable
+ * @secure_access: Secure cryp state enable/disable
+ *
+ * Protection configuration structure for setting privilage access
+ */
+struct cryp_protection_config {
+	enum cryp_state privilege_access;
+	enum cryp_state secure_access;
+};
+
+/* Cryp status */
+enum cryp_status_id {
+	CRYP_STATUS_BUSY = 0x10,
+	CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08,
+	CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04,
+	CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02,
+	CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01
+};
+
+/* Cryp DMA interface */
+#define CRYP_DMA_TX_FIFO	0x08
+#define CRYP_DMA_RX_FIFO	0x10
+
+enum cryp_dma_req_type {
+	CRYP_DMA_DISABLE_BOTH,
+	CRYP_DMA_ENABLE_IN_DATA,
+	CRYP_DMA_ENABLE_OUT_DATA,
+	CRYP_DMA_ENABLE_BOTH_DIRECTIONS
+};
+
+enum cryp_dma_channel {
+	CRYP_DMA_RX = 0,
+	CRYP_DMA_TX
+};
+
+/* Key registers */
+enum cryp_key_reg_index {
+	CRYP_KEY_REG_1,
+	CRYP_KEY_REG_2,
+	CRYP_KEY_REG_3,
+	CRYP_KEY_REG_4
+};
+
+/* Key register left and right */
+struct cryp_key_value {
+	u32 key_value_left;
+	u32 key_value_right;
+};
+
+/* Cryp Initialization structure */
+enum cryp_init_vector_index {
+	CRYP_INIT_VECTOR_INDEX_0,
+	CRYP_INIT_VECTOR_INDEX_1
+};
+
+/* struct cryp_init_vector_value -
+ * @init_value_left
+ * @init_value_right
+ * */
+struct cryp_init_vector_value {
+	u32 init_value_left;
+	u32 init_value_right;
+};
+
+/**
+ * struct cryp_device_context - structure for a cryp context.
+ * @cr: control register
+ * @dmacr: DMA control register
+ * @imsc: Interrupt mask set/clear register
+ * @key_1_l: Key 1l register
+ * @key_1_r: Key 1r register
+ * @key_2_l: Key 2l register
+ * @key_2_r: Key 2r register
+ * @key_3_l: Key 3l register
+ * @key_3_r: Key 3r register
+ * @key_4_l: Key 4l register
+ * @key_4_r: Key 4r register
+ * @init_vect_0_l: Initialization vector 0l register
+ * @init_vect_0_r: Initialization vector 0r register
+ * @init_vect_1_l: Initialization vector 1l register
+ * @init_vect_1_r: Initialization vector 0r register
+ * @din: Data in register
+ * @dout: Data out register
+ *
+ * CRYP power management specifc structure.
+ */
+struct cryp_device_context {
+	u32 cr;
+	u32 dmacr;
+	u32 imsc;
+
+	u32 key_1_l;
+	u32 key_1_r;
+	u32 key_2_l;
+	u32 key_2_r;
+	u32 key_3_l;
+	u32 key_3_r;
+	u32 key_4_l;
+	u32 key_4_r;
+
+	u32 init_vect_0_l;
+	u32 init_vect_0_r;
+	u32 init_vect_1_l;
+	u32 init_vect_1_r;
+
+	u32 din;
+	u32 dout;
+};
+
+struct cryp_dma {
+	dma_cap_mask_t mask;
+	struct completion cryp_dma_complete;
+	struct dma_chan *chan_cryp2mem;
+	struct dma_chan *chan_mem2cryp;
+	struct stedma40_chan_cfg *cfg_cryp2mem;
+	struct stedma40_chan_cfg *cfg_mem2cryp;
+	int sg_src_len;
+	int sg_dst_len;
+	struct scatterlist *sg_src;
+	struct scatterlist *sg_dst;
+	int nents_src;
+	int nents_dst;
+};
+
+/**
+ * struct cryp_device_data - structure for a cryp device.
+ * @base: Pointer to virtual base address of the cryp device.
+ * @phybase: Pointer to physical memory location of the cryp device.
+ * @dev: Pointer to the devices dev structure.
+ * @clk: Pointer to the device's clock control.
+ * @pwr_regulator: Pointer to the device's power control.
+ * @power_status: Current status of the power.
+ * @ctx_lock: Lock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @list_node: For inclusion into a klist.
+ * @dma: The dma structure holding channel configuration.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_spinlock: Spinlock for power_state.
+ * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx.
+ */
+struct cryp_device_data {
+	struct cryp_register __iomem *base;
+	phys_addr_t phybase;
+	struct device *dev;
+	struct clk *clk;
+	struct regulator *pwr_regulator;
+	int power_status;
+	struct spinlock ctx_lock;
+	struct cryp_ctx *current_ctx;
+	struct klist_node list_node;
+	struct cryp_dma dma;
+	bool power_state;
+	struct spinlock power_state_spinlock;
+	bool restore_dev_ctx;
+};
+
+void cryp_wait_until_done(struct cryp_device_data *device_data);
+
+/* Initialization functions */
+
+int cryp_check(struct cryp_device_data *device_data);
+
+void cryp_activity(struct cryp_device_data *device_data,
+		   enum cryp_crypen cryp_crypen);
+
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data);
+
+int cryp_set_configuration(struct cryp_device_data *device_data,
+			   struct cryp_config *cryp_config,
+			   u32 *control_register);
+
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+			    enum cryp_dma_req_type dma_req);
+
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+			      enum cryp_key_reg_index key_reg_index,
+			      struct cryp_key_value key_value);
+
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+			       enum cryp_init_vector_index
+			       init_vector_index,
+			       struct cryp_init_vector_value
+			       init_vector_value);
+
+int cryp_configure_protection(struct cryp_device_data *device_data,
+			      struct cryp_protection_config *p_protect_config);
+
+/* Power management funtions */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+			      struct cryp_device_context *ctx,
+			      int cryp_mode);
+
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+				 struct cryp_device_context *ctx);
+
+/* Data transfer and status bits. */
+int cryp_is_logic_busy(struct cryp_device_data *device_data);
+
+int cryp_get_status(struct cryp_device_data *device_data);
+
+/**
+ * cryp_write_indata - This routine writes 32 bit data into the data input
+ *		       register of the cryptography IP.
+ * @device_data: Pointer to the device data struct for base address.
+ * @write_data: Data to write.
+ */
+int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data);
+
+/**
+ * cryp_read_outdata - This routine reads the data from the data output
+ *		       register of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ * @read_data: Read the data from the output FIFO.
+ */
+int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data);
+
+#endif /* _CRYP_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
new file mode 100644
index 0000000..d2663a4
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -0,0 +1,1762 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/klist.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/semaphore.h>
+#include <linux/platform_data/dma-ste-dma40.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+
+#include <linux/platform_data/crypto-ux500.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+#define CRYP_MAX_KEY_SIZE	32
+#define BYTES_PER_WORD		4
+
+static int cryp_mode;
+static atomic_t session_id;
+
+static struct stedma40_chan_cfg *mem_to_engine;
+static struct stedma40_chan_cfg *engine_to_mem;
+
+/**
+ * struct cryp_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct cryp_driver_data {
+	struct klist device_list;
+	struct semaphore device_allocation;
+};
+
+/**
+ * struct cryp_ctx - Crypto context
+ * @config: Crypto mode.
+ * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @keylen: Length of key.
+ * @iv: Pointer to initialization vector.
+ * @indata: Pointer to indata.
+ * @outdata: Pointer to outdata.
+ * @datalen: Length of indata.
+ * @outlen: Length of outdata.
+ * @blocksize: Size of blocks.
+ * @updated: Updated flag.
+ * @dev_ctx: Device dependent context.
+ * @device: Pointer to the device.
+ */
+struct cryp_ctx {
+	struct cryp_config config;
+	u8 key[CRYP_MAX_KEY_SIZE];
+	u32 keylen;
+	u8 *iv;
+	const u8 *indata;
+	u8 *outdata;
+	u32 datalen;
+	u32 outlen;
+	u32 blocksize;
+	u8 updated;
+	struct cryp_device_context dev_ctx;
+	struct cryp_device_data *device;
+	u32 session_id;
+};
+
+static struct cryp_driver_data driver_data;
+
+/**
+ * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
+ * @in: Data to convert.
+ */
+static inline u32 uint8p_to_uint32_be(u8 *in)
+{
+	u32 *data = (u32 *)in;
+
+	return cpu_to_be32p(data);
+}
+
+/**
+ * swap_bits_in_byte - mirror the bits in a byte
+ * @b: the byte to be mirrored
+ *
+ * The bits are swapped the following way:
+ *  Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
+ *  nibble 2 (n2) bits 4-7.
+ *
+ *  Nibble 1 (n1):
+ *  (The "old" (moved) bit is replaced with a zero)
+ *  1. Move bit 6 and 7, 4 positions to the left.
+ *  2. Move bit 3 and 5, 2 positions to the left.
+ *  3. Move bit 1-4, 1 position to the left.
+ *
+ *  Nibble 2 (n2):
+ *  1. Move bit 0 and 1, 4 positions to the right.
+ *  2. Move bit 2 and 4, 2 positions to the right.
+ *  3. Move bit 3-6, 1 position to the right.
+ *
+ *  Combine the two nibbles to a complete and swapped byte.
+ */
+
+static inline u8 swap_bits_in_byte(u8 b)
+{
+#define R_SHIFT_4_MASK  0xc0 /* Bits 6 and 7, right shift 4 */
+#define R_SHIFT_2_MASK  0x28 /* (After right shift 4) Bits 3 and 5,
+				  right shift 2 */
+#define R_SHIFT_1_MASK  0x1e /* (After right shift 2) Bits 1-4,
+				  right shift 1 */
+#define L_SHIFT_4_MASK  0x03 /* Bits 0 and 1, left shift 4 */
+#define L_SHIFT_2_MASK  0x14 /* (After left shift 4) Bits 2 and 4,
+				  left shift 2 */
+#define L_SHIFT_1_MASK  0x78 /* (After left shift 1) Bits 3-6,
+				  left shift 1 */
+
+	u8 n1;
+	u8 n2;
+
+	/* Swap most significant nibble */
+	/* Right shift 4, bits 6 and 7 */
+	n1 = ((b  & R_SHIFT_4_MASK) >> 4) | (b  & ~(R_SHIFT_4_MASK >> 4));
+	/* Right shift 2, bits 3 and 5 */
+	n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
+	/* Right shift 1, bits 1-4 */
+	n1 = (n1  & R_SHIFT_1_MASK) >> 1;
+
+	/* Swap least significant nibble */
+	/* Left shift 4, bits 0 and 1 */
+	n2 = ((b  & L_SHIFT_4_MASK) << 4) | (b  & ~(L_SHIFT_4_MASK << 4));
+	/* Left shift 2, bits 2 and 4 */
+	n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
+	/* Left shift 1, bits 3-6 */
+	n2 = (n2  & L_SHIFT_1_MASK) << 1;
+
+	return n1 | n2;
+}
+
+static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
+						      u8 *out, u32 len)
+{
+	unsigned int i = 0;
+	int j;
+	int index = 0;
+
+	j = len - BYTES_PER_WORD;
+	while (j >= 0) {
+		for (i = 0; i < BYTES_PER_WORD; i++) {
+			index = len - j - BYTES_PER_WORD + i;
+			out[j + i] =
+				swap_bits_in_byte(in[index]);
+		}
+		j -= BYTES_PER_WORD;
+	}
+}
+
+static void add_session_id(struct cryp_ctx *ctx)
+{
+	/*
+	 * We never want 0 to be a valid value, since this is the default value
+	 * for the software context.
+	 */
+	if (unlikely(atomic_inc_and_test(&session_id)))
+		atomic_inc(&session_id);
+
+	ctx->session_id = atomic_read(&session_id);
+}
+
+static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+{
+	struct cryp_ctx *ctx;
+	int count;
+	struct cryp_device_data *device_data;
+
+	if (param == NULL) {
+		BUG_ON(!param);
+		return IRQ_HANDLED;
+	}
+
+	/* The device is coming from the one found in hw_crypt_noxts. */
+	device_data = (struct cryp_device_data *)param;
+
+	ctx = device_data->current_ctx;
+
+	if (ctx == NULL) {
+		BUG_ON(!ctx);
+		return IRQ_HANDLED;
+	}
+
+	dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
+		cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
+		"out" : "in");
+
+	if (cryp_pending_irq_src(device_data,
+				 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
+		if (ctx->outlen / ctx->blocksize > 0) {
+			count = ctx->blocksize / 4;
+
+			readsl(&device_data->base->dout, ctx->outdata, count);
+			ctx->outdata += count;
+			ctx->outlen -= count;
+
+			if (ctx->outlen == 0) {
+				cryp_disable_irq_src(device_data,
+						     CRYP_IRQ_SRC_OUTPUT_FIFO);
+			}
+		}
+	} else if (cryp_pending_irq_src(device_data,
+					CRYP_IRQ_SRC_INPUT_FIFO)) {
+		if (ctx->datalen / ctx->blocksize > 0) {
+			count = ctx->blocksize / 4;
+
+			writesl(&device_data->base->din, ctx->indata, count);
+
+			ctx->indata += count;
+			ctx->datalen -= count;
+
+			if (ctx->datalen == 0)
+				cryp_disable_irq_src(device_data,
+						   CRYP_IRQ_SRC_INPUT_FIFO);
+
+			if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
+				CRYP_PUT_BITS(&device_data->base->cr,
+					      CRYP_START_ENABLE,
+					      CRYP_CR_START_POS,
+					      CRYP_CR_START_MASK);
+
+				cryp_wait_until_done(device_data);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int mode_is_aes(enum cryp_algo_mode mode)
+{
+	return	CRYP_ALGO_AES_ECB == mode ||
+		CRYP_ALGO_AES_CBC == mode ||
+		CRYP_ALGO_AES_CTR == mode ||
+		CRYP_ALGO_AES_XTS == mode;
+}
+
+static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
+		  enum cryp_init_vector_index index)
+{
+	struct cryp_init_vector_value vector_value;
+
+	dev_dbg(device_data->dev, "[%s]", __func__);
+
+	vector_value.init_value_left = left;
+	vector_value.init_value_right = right;
+
+	return cryp_configure_init_vector(device_data,
+					  index,
+					  vector_value);
+}
+
+static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
+{
+	int i;
+	int status = 0;
+	int num_of_regs = ctx->blocksize / 8;
+	u32 iv[AES_BLOCK_SIZE / 4];
+
+	dev_dbg(device_data->dev, "[%s]", __func__);
+
+	/*
+	 * Since we loop on num_of_regs we need to have a check in case
+	 * someone provides an incorrect blocksize which would force calling
+	 * cfg_iv with i greater than 2 which is an error.
+	 */
+	if (num_of_regs > 2) {
+		dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
+			__func__, ctx->blocksize);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ctx->blocksize / 4; i++)
+		iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
+
+	for (i = 0; i < num_of_regs; i++) {
+		status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
+				(enum cryp_init_vector_index) i);
+		if (status != 0)
+			return status;
+	}
+	return status;
+}
+
+static int set_key(struct cryp_device_data *device_data,
+		   u32 left_key,
+		   u32 right_key,
+		   enum cryp_key_reg_index index)
+{
+	struct cryp_key_value key_value;
+	int cryp_error;
+
+	dev_dbg(device_data->dev, "[%s]", __func__);
+
+	key_value.key_value_left = left_key;
+	key_value.key_value_right = right_key;
+
+	cryp_error = cryp_configure_key_values(device_data,
+					       index,
+					       key_value);
+	if (cryp_error != 0)
+		dev_err(device_data->dev, "[%s]: "
+			"cryp_configure_key_values() failed!", __func__);
+
+	return cryp_error;
+}
+
+static int cfg_keys(struct cryp_ctx *ctx)
+{
+	int i;
+	int num_of_regs = ctx->keylen / 8;
+	u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
+	int cryp_error = 0;
+
+	dev_dbg(ctx->device->dev, "[%s]", __func__);
+
+	if (mode_is_aes(ctx->config.algomode)) {
+		swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
+						   (u8 *)swapped_key,
+						   ctx->keylen);
+	} else {
+		for (i = 0; i < ctx->keylen / 4; i++)
+			swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
+	}
+
+	for (i = 0; i < num_of_regs; i++) {
+		cryp_error = set_key(ctx->device,
+				     *(((u32 *)swapped_key)+i*2),
+				     *(((u32 *)swapped_key)+i*2+1),
+				     (enum cryp_key_reg_index) i);
+
+		if (cryp_error != 0) {
+			dev_err(ctx->device->dev, "[%s]: set_key() failed!",
+					__func__);
+			return cryp_error;
+		}
+	}
+	return cryp_error;
+}
+
+static int cryp_setup_context(struct cryp_ctx *ctx,
+			      struct cryp_device_data *device_data)
+{
+	u32 control_register = CRYP_CR_DEFAULT;
+
+	switch (cryp_mode) {
+	case CRYP_MODE_INTERRUPT:
+		writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
+		break;
+
+	case CRYP_MODE_DMA:
+		writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
+		break;
+
+	default:
+		break;
+	}
+
+	if (ctx->updated == 0) {
+		cryp_flush_inoutfifo(device_data);
+		if (cfg_keys(ctx) != 0) {
+			dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
+				__func__);
+			return -EINVAL;
+		}
+
+		if (ctx->iv &&
+		    CRYP_ALGO_AES_ECB != ctx->config.algomode &&
+		    CRYP_ALGO_DES_ECB != ctx->config.algomode &&
+		    CRYP_ALGO_TDES_ECB != ctx->config.algomode) {
+			if (cfg_ivs(device_data, ctx) != 0)
+				return -EPERM;
+		}
+
+		cryp_set_configuration(device_data, &ctx->config,
+				       &control_register);
+		add_session_id(ctx);
+	} else if (ctx->updated == 1 &&
+		   ctx->session_id != atomic_read(&session_id)) {
+		cryp_flush_inoutfifo(device_data);
+		cryp_restore_device_context(device_data, &ctx->dev_ctx);
+
+		add_session_id(ctx);
+		control_register = ctx->dev_ctx.cr;
+	} else
+		control_register = ctx->dev_ctx.cr;
+
+	writel(control_register |
+	       (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
+	       &device_data->base->cr);
+
+	return 0;
+}
+
+static int cryp_get_device_data(struct cryp_ctx *ctx,
+				struct cryp_device_data **device_data)
+{
+	int ret;
+	struct klist_iter device_iterator;
+	struct klist_node *device_node;
+	struct cryp_device_data *local_device_data = NULL;
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+	/* Wait until a device is available */
+	ret = down_interruptible(&driver_data.device_allocation);
+	if (ret)
+		return ret;  /* Interrupted */
+
+	/* Select a device */
+	klist_iter_init(&driver_data.device_list, &device_iterator);
+
+	device_node = klist_next(&device_iterator);
+	while (device_node) {
+		local_device_data = container_of(device_node,
+					   struct cryp_device_data, list_node);
+		spin_lock(&local_device_data->ctx_lock);
+		/* current_ctx allocates a device, NULL = unallocated */
+		if (local_device_data->current_ctx) {
+			device_node = klist_next(&device_iterator);
+		} else {
+			local_device_data->current_ctx = ctx;
+			ctx->device = local_device_data;
+			spin_unlock(&local_device_data->ctx_lock);
+			break;
+		}
+		spin_unlock(&local_device_data->ctx_lock);
+	}
+	klist_iter_exit(&device_iterator);
+
+	if (!device_node) {
+		/**
+		 * No free device found.
+		 * Since we allocated a device with down_interruptible, this
+		 * should not be able to happen.
+		 * Number of available devices, which are contained in
+		 * device_allocation, is therefore decremented by not doing
+		 * an up(device_allocation).
+		 */
+		return -EBUSY;
+	}
+
+	*device_data = local_device_data;
+
+	return 0;
+}
+
+static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
+				   struct device *dev)
+{
+	struct dma_slave_config mem2cryp = {
+		.direction = DMA_MEM_TO_DEV,
+		.dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
+		.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+		.dst_maxburst = 4,
+	};
+	struct dma_slave_config cryp2mem = {
+		.direction = DMA_DEV_TO_MEM,
+		.src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
+		.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+		.src_maxburst = 4,
+	};
+
+	dma_cap_zero(device_data->dma.mask);
+	dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+	device_data->dma.cfg_mem2cryp = mem_to_engine;
+	device_data->dma.chan_mem2cryp =
+		dma_request_channel(device_data->dma.mask,
+				    stedma40_filter,
+				    device_data->dma.cfg_mem2cryp);
+
+	device_data->dma.cfg_cryp2mem = engine_to_mem;
+	device_data->dma.chan_cryp2mem =
+		dma_request_channel(device_data->dma.mask,
+				    stedma40_filter,
+				    device_data->dma.cfg_cryp2mem);
+
+	dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp);
+	dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem);
+
+	init_completion(&device_data->dma.cryp_dma_complete);
+}
+
+static void cryp_dma_out_callback(void *data)
+{
+	struct cryp_ctx *ctx = (struct cryp_ctx *) data;
+	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+	complete(&ctx->device->dma.cryp_dma_complete);
+}
+
+static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
+				 struct scatterlist *sg,
+				 int len,
+				 enum dma_data_direction direction)
+{
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = NULL;
+	dma_cookie_t cookie;
+
+	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+	if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
+		dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
+			"aligned! Addr: 0x%08x", __func__, (u32)sg);
+		return -EFAULT;
+	}
+
+	switch (direction) {
+	case DMA_TO_DEVICE:
+		channel = ctx->device->dma.chan_mem2cryp;
+		ctx->device->dma.sg_src = sg;
+		ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
+						 ctx->device->dma.sg_src,
+						 ctx->device->dma.nents_src,
+						 direction);
+
+		if (!ctx->device->dma.sg_src_len) {
+			dev_dbg(ctx->device->dev,
+				"[%s]: Could not map the sg list (TO_DEVICE)",
+				__func__);
+			return -EFAULT;
+		}
+
+		dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+			"(TO_DEVICE)", __func__);
+
+		desc = dmaengine_prep_slave_sg(channel,
+				ctx->device->dma.sg_src,
+				ctx->device->dma.sg_src_len,
+				direction, DMA_CTRL_ACK);
+		break;
+
+	case DMA_FROM_DEVICE:
+		channel = ctx->device->dma.chan_cryp2mem;
+		ctx->device->dma.sg_dst = sg;
+		ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
+						 ctx->device->dma.sg_dst,
+						 ctx->device->dma.nents_dst,
+						 direction);
+
+		if (!ctx->device->dma.sg_dst_len) {
+			dev_dbg(ctx->device->dev,
+				"[%s]: Could not map the sg list (FROM_DEVICE)",
+				__func__);
+			return -EFAULT;
+		}
+
+		dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+			"(FROM_DEVICE)", __func__);
+
+		desc = dmaengine_prep_slave_sg(channel,
+				ctx->device->dma.sg_dst,
+				ctx->device->dma.sg_dst_len,
+				direction,
+				DMA_CTRL_ACK |
+				DMA_PREP_INTERRUPT);
+
+		desc->callback = cryp_dma_out_callback;
+		desc->callback_param = ctx;
+		break;
+
+	default:
+		dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
+			__func__);
+		return -EFAULT;
+	}
+
+	cookie = dmaengine_submit(desc);
+	dma_async_issue_pending(channel);
+
+	return 0;
+}
+
+static void cryp_dma_done(struct cryp_ctx *ctx)
+{
+	struct dma_chan *chan;
+
+	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+	chan = ctx->device->dma.chan_mem2cryp;
+	dmaengine_terminate_all(chan);
+	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
+		     ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+
+	chan = ctx->device->dma.chan_cryp2mem;
+	dmaengine_terminate_all(chan);
+	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
+		     ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+}
+
+static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
+			  int len)
+{
+	int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+	dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+	if (error) {
+		dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+			"failed", __func__);
+		return error;
+	}
+
+	return len;
+}
+
+static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
+{
+	int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
+	if (error) {
+		dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+			"failed", __func__);
+		return error;
+	}
+
+	return len;
+}
+
+static void cryp_polling_mode(struct cryp_ctx *ctx,
+			      struct cryp_device_data *device_data)
+{
+	int len = ctx->blocksize / BYTES_PER_WORD;
+	int remaining_length = ctx->datalen;
+	u32 *indata = (u32 *)ctx->indata;
+	u32 *outdata = (u32 *)ctx->outdata;
+
+	while (remaining_length > 0) {
+		writesl(&device_data->base->din, indata, len);
+		indata += len;
+		remaining_length -= (len * BYTES_PER_WORD);
+		cryp_wait_until_done(device_data);
+
+		readsl(&device_data->base->dout, outdata, len);
+		outdata += len;
+		cryp_wait_until_done(device_data);
+	}
+}
+
+static int cryp_disable_power(struct device *dev,
+			      struct cryp_device_data *device_data,
+			      bool save_device_context)
+{
+	int ret = 0;
+
+	dev_dbg(dev, "[%s]", __func__);
+
+	spin_lock(&device_data->power_state_spinlock);
+	if (!device_data->power_state)
+		goto out;
+
+	spin_lock(&device_data->ctx_lock);
+	if (save_device_context && device_data->current_ctx) {
+		cryp_save_device_context(device_data,
+				&device_data->current_ctx->dev_ctx,
+				cryp_mode);
+		device_data->restore_dev_ctx = true;
+	}
+	spin_unlock(&device_data->ctx_lock);
+
+	clk_disable(device_data->clk);
+	ret = regulator_disable(device_data->pwr_regulator);
+	if (ret)
+		dev_err(dev, "[%s]: "
+				"regulator_disable() failed!",
+				__func__);
+
+	device_data->power_state = false;
+
+out:
+	spin_unlock(&device_data->power_state_spinlock);
+
+	return ret;
+}
+
+static int cryp_enable_power(
+		struct device *dev,
+		struct cryp_device_data *device_data,
+		bool restore_device_context)
+{
+	int ret = 0;
+
+	dev_dbg(dev, "[%s]", __func__);
+
+	spin_lock(&device_data->power_state_spinlock);
+	if (!device_data->power_state) {
+		ret = regulator_enable(device_data->pwr_regulator);
+		if (ret) {
+			dev_err(dev, "[%s]: regulator_enable() failed!",
+					__func__);
+			goto out;
+		}
+
+		ret = clk_enable(device_data->clk);
+		if (ret) {
+			dev_err(dev, "[%s]: clk_enable() failed!",
+					__func__);
+			regulator_disable(device_data->pwr_regulator);
+			goto out;
+		}
+		device_data->power_state = true;
+	}
+
+	if (device_data->restore_dev_ctx) {
+		spin_lock(&device_data->ctx_lock);
+		if (restore_device_context && device_data->current_ctx) {
+			device_data->restore_dev_ctx = false;
+			cryp_restore_device_context(device_data,
+					&device_data->current_ctx->dev_ctx);
+		}
+		spin_unlock(&device_data->ctx_lock);
+	}
+out:
+	spin_unlock(&device_data->power_state_spinlock);
+
+	return ret;
+}
+
+static int hw_crypt_noxts(struct cryp_ctx *ctx,
+			  struct cryp_device_data *device_data)
+{
+	int ret = 0;
+
+	const u8 *indata = ctx->indata;
+	u8 *outdata = ctx->outdata;
+	u32 datalen = ctx->datalen;
+	u32 outlen = datalen;
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+	ctx->outlen = ctx->datalen;
+
+	if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
+		pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
+			 "0x%08x", __func__, (u32)indata);
+		return -EINVAL;
+	}
+
+	ret = cryp_setup_context(ctx, device_data);
+
+	if (ret)
+		goto out;
+
+	if (cryp_mode == CRYP_MODE_INTERRUPT) {
+		cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
+				    CRYP_IRQ_SRC_OUTPUT_FIFO);
+
+		/*
+		 * ctx->outlen is decremented in the cryp_interrupt_handler
+		 * function. We had to add cpu_relax() (barrier) to make sure
+		 * that gcc didn't optimze away this variable.
+		 */
+		while (ctx->outlen > 0)
+			cpu_relax();
+	} else if (cryp_mode == CRYP_MODE_POLLING ||
+		   cryp_mode == CRYP_MODE_DMA) {
+		/*
+		 * The reason for having DMA in this if case is that if we are
+		 * running cryp_mode = 2, then we separate DMA routines for
+		 * handling cipher/plaintext > blocksize, except when
+		 * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
+		 * the polling mode. Overhead of doing DMA setup eats up the
+		 * benefits using it.
+		 */
+		cryp_polling_mode(ctx, device_data);
+	} else {
+		dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
+			__func__);
+		ret = -EPERM;
+		goto out;
+	}
+
+	cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+	ctx->updated = 1;
+
+out:
+	ctx->indata = indata;
+	ctx->outdata = outdata;
+	ctx->datalen = datalen;
+	ctx->outlen = outlen;
+
+	return ret;
+}
+
+static int get_nents(struct scatterlist *sg, int nbytes)
+{
+	int nents = 0;
+
+	while (nbytes > 0) {
+		nbytes -= sg->length;
+		sg = sg_next(sg);
+		nents++;
+	}
+
+	return nents;
+}
+
+static int ablk_dma_crypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct cryp_device_data *device_data;
+
+	int bytes_written = 0;
+	int bytes_read = 0;
+	int ret;
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+	ctx->datalen = areq->nbytes;
+	ctx->outlen = areq->nbytes;
+
+	ret = cryp_get_device_data(ctx, &device_data);
+	if (ret)
+		return ret;
+
+	ret = cryp_setup_context(ctx, device_data);
+	if (ret)
+		goto out;
+
+	/* We have the device now, so store the nents in the dma struct. */
+	ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
+	ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
+
+	/* Enable DMA in- and output. */
+	cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
+
+	bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
+	bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
+
+	wait_for_completion(&ctx->device->dma.cryp_dma_complete);
+	cryp_dma_done(ctx);
+
+	cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+	ctx->updated = 1;
+
+out:
+	spin_lock(&device_data->ctx_lock);
+	device_data->current_ctx = NULL;
+	ctx->device = NULL;
+	spin_unlock(&device_data->ctx_lock);
+
+	/*
+	 * The down_interruptible part for this semaphore is called in
+	 * cryp_get_device_data.
+	 */
+	up(&driver_data.device_allocation);
+
+	if (unlikely(bytes_written != bytes_read))
+		return -EPERM;
+
+	return 0;
+}
+
+static int ablk_crypt(struct ablkcipher_request *areq)
+{
+	struct ablkcipher_walk walk;
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct cryp_device_data *device_data;
+	unsigned long src_paddr;
+	unsigned long dst_paddr;
+	int ret;
+	int nbytes;
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+	ret = cryp_get_device_data(ctx, &device_data);
+	if (ret)
+		goto out;
+
+	ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
+	ret = ablkcipher_walk_phys(areq, &walk);
+
+	if (ret) {
+		pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
+			__func__);
+		goto out;
+	}
+
+	while ((nbytes = walk.nbytes) > 0) {
+		ctx->iv = walk.iv;
+		src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
+		ctx->indata = phys_to_virt(src_paddr);
+
+		dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
+		ctx->outdata = phys_to_virt(dst_paddr);
+
+		ctx->datalen = nbytes - (nbytes % ctx->blocksize);
+
+		ret = hw_crypt_noxts(ctx, device_data);
+		if (ret)
+			goto out;
+
+		nbytes -= ctx->datalen;
+		ret = ablkcipher_walk_done(areq, &walk, nbytes);
+		if (ret)
+			goto out;
+	}
+	ablkcipher_walk_complete(&walk);
+
+out:
+	/* Release the device */
+	spin_lock(&device_data->ctx_lock);
+	device_data->current_ctx = NULL;
+	ctx->device = NULL;
+	spin_unlock(&device_data->ctx_lock);
+
+	/*
+	 * The down_interruptible part for this semaphore is called in
+	 * cryp_get_device_data.
+	 */
+	up(&driver_data.device_allocation);
+
+	return ret;
+}
+
+static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+				 const u8 *key, unsigned int keylen)
+{
+	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 *flags = &cipher->base.crt_flags;
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+		ctx->config.keysize = CRYP_KEY_SIZE_128;
+		break;
+
+	case AES_KEYSIZE_192:
+		ctx->config.keysize = CRYP_KEY_SIZE_192;
+		break;
+
+	case AES_KEYSIZE_256:
+		ctx->config.keysize = CRYP_KEY_SIZE_256;
+		break;
+
+	default:
+		pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	ctx->updated = 0;
+
+	return 0;
+}
+
+static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+				 const u8 *key, unsigned int keylen)
+{
+	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 *flags = &cipher->base.crt_flags;
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+	if (keylen != DES_KEY_SIZE) {
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+				__func__);
+		return -EINVAL;
+	}
+
+	ret = des_ekey(tmp, key);
+	if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+				__func__);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	ctx->updated = 0;
+	return 0;
+}
+
+static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+				  const u8 *key, unsigned int keylen)
+{
+	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 *flags = &cipher->base.crt_flags;
+	const u32 *K = (const u32 *)key;
+	u32 tmp[DES3_EDE_EXPKEY_WORDS];
+	int i, ret;
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+	if (keylen != DES3_EDE_KEY_SIZE) {
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+				__func__);
+		return -EINVAL;
+	}
+
+	/* Checking key interdependency for weak key detection. */
+	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+				!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+			(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+				__func__);
+		return -EINVAL;
+	}
+	for (i = 0; i < 3; i++) {
+		ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
+		if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			pr_debug(DEV_DBG_NAME " [%s]: "
+					"CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+			return -EINVAL;
+		}
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	ctx->updated = 0;
+	return 0;
+}
+
+static int cryp_blk_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+	ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+
+	/*
+	 * DMA does not work for DES due to a hw bug */
+	if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
+		return ablk_dma_crypt(areq);
+
+	/* For everything except DMA, we run the non DMA version. */
+	return ablk_crypt(areq);
+}
+
+static int cryp_blk_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+	ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+
+	/* DMA does not work for DES due to a hw bug */
+	if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
+		return ablk_dma_crypt(areq);
+
+	/* For everything except DMA, we run the non DMA version. */
+	return ablk_crypt(areq);
+}
+
+struct cryp_algo_template {
+	enum cryp_algo_mode algomode;
+	struct crypto_alg crypto;
+};
+
+static int cryp_cra_init(struct crypto_tfm *tfm)
+{
+	struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct cryp_algo_template *cryp_alg = container_of(alg,
+			struct cryp_algo_template,
+			crypto);
+
+	ctx->config.algomode = cryp_alg->algomode;
+	ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+	return 0;
+}
+
+static struct cryp_algo_template cryp_algs[] = {
+	{
+		.algomode = CRYP_ALGO_AES_ECB,
+		.crypto = {
+			.cra_name = "aes",
+			.cra_driver_name = "aes-ux500",
+			.cra_priority =	300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = AES_MIN_KEY_SIZE,
+					.max_keysize = AES_MAX_KEY_SIZE,
+					.setkey = aes_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt
+				}
+			}
+		}
+	},
+	{
+		.algomode = CRYP_ALGO_AES_ECB,
+		.crypto = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "ecb-aes-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = AES_MIN_KEY_SIZE,
+					.max_keysize = AES_MAX_KEY_SIZE,
+					.setkey = aes_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt,
+				}
+			}
+		}
+	},
+	{
+		.algomode = CRYP_ALGO_AES_CBC,
+		.crypto = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "cbc-aes-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = AES_MIN_KEY_SIZE,
+					.max_keysize = AES_MAX_KEY_SIZE,
+					.setkey = aes_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt,
+					.ivsize = AES_BLOCK_SIZE,
+				}
+			}
+		}
+	},
+	{
+		.algomode = CRYP_ALGO_AES_CTR,
+		.crypto = {
+			.cra_name = "ctr(aes)",
+			.cra_driver_name = "ctr-aes-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+						CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = AES_MIN_KEY_SIZE,
+					.max_keysize = AES_MAX_KEY_SIZE,
+					.setkey = aes_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt,
+					.ivsize = AES_BLOCK_SIZE,
+				}
+			}
+		}
+	},
+	{
+		.algomode = CRYP_ALGO_DES_ECB,
+		.crypto = {
+			.cra_name = "des",
+			.cra_driver_name = "des-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+						CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = DES_KEY_SIZE,
+					.max_keysize = DES_KEY_SIZE,
+					.setkey = des_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt
+				}
+			}
+		}
+
+	},
+	{
+		.algomode = CRYP_ALGO_TDES_ECB,
+		.crypto = {
+			.cra_name = "des3_ede",
+			.cra_driver_name = "des3_ede-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+						CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = DES3_EDE_KEY_SIZE,
+					.max_keysize = DES3_EDE_KEY_SIZE,
+					.setkey = des_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt
+				}
+			}
+		}
+	},
+	{
+		.algomode = CRYP_ALGO_DES_ECB,
+		.crypto = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "ecb-des-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = DES_KEY_SIZE,
+					.max_keysize = DES_KEY_SIZE,
+					.setkey = des_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt,
+				}
+			}
+		}
+	},
+	{
+		.algomode = CRYP_ALGO_TDES_ECB,
+		.crypto = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "ecb-des3_ede-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = DES3_EDE_KEY_SIZE,
+					.max_keysize = DES3_EDE_KEY_SIZE,
+					.setkey = des3_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt,
+				}
+			}
+		}
+	},
+	{
+		.algomode = CRYP_ALGO_DES_CBC,
+		.crypto = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "cbc-des-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = DES_KEY_SIZE,
+					.max_keysize = DES_KEY_SIZE,
+					.setkey = des_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt,
+				}
+			}
+		}
+	},
+	{
+		.algomode = CRYP_ALGO_TDES_CBC,
+		.crypto = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "cbc-des3_ede-ux500",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct cryp_ctx),
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = cryp_cra_init,
+			.cra_module = THIS_MODULE,
+			.cra_u = {
+				.ablkcipher = {
+					.min_keysize = DES3_EDE_KEY_SIZE,
+					.max_keysize = DES3_EDE_KEY_SIZE,
+					.setkey = des3_ablkcipher_setkey,
+					.encrypt = cryp_blk_encrypt,
+					.decrypt = cryp_blk_decrypt,
+					.ivsize = DES3_EDE_BLOCK_SIZE,
+				}
+			}
+		}
+	}
+};
+
+/**
+ * cryp_algs_register_all -
+ */
+static int cryp_algs_register_all(void)
+{
+	int ret;
+	int i;
+	int count;
+
+	pr_debug("[%s]", __func__);
+
+	for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
+		ret = crypto_register_alg(&cryp_algs[i].crypto);
+		if (ret) {
+			count = i;
+			pr_err("[%s] alg registration failed",
+					cryp_algs[i].crypto.cra_driver_name);
+			goto unreg;
+		}
+	}
+	return 0;
+unreg:
+	for (i = 0; i < count; i++)
+		crypto_unregister_alg(&cryp_algs[i].crypto);
+	return ret;
+}
+
+/**
+ * cryp_algs_unregister_all -
+ */
+static void cryp_algs_unregister_all(void)
+{
+	int i;
+
+	pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+	for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
+		crypto_unregister_alg(&cryp_algs[i].crypto);
+}
+
+static int ux500_cryp_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+	struct resource *res_irq;
+	struct cryp_device_data *device_data;
+	struct cryp_protection_config prot = {
+		.privilege_access = CRYP_STATE_ENABLE
+	};
+	struct device *dev = &pdev->dev;
+
+	dev_dbg(dev, "[%s]", __func__);
+	device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
+	if (!device_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	device_data->dev = dev;
+	device_data->current_ctx = NULL;
+
+	/* Grab the DMA configuration from platform data. */
+	mem_to_engine = &((struct cryp_platform_data *)
+			 dev->platform_data)->mem_to_engine;
+	engine_to_mem = &((struct cryp_platform_data *)
+			 dev->platform_data)->engine_to_mem;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "[%s]: platform_get_resource() failed",
+				__func__);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	device_data->phybase = res->start;
+	device_data->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(device_data->base)) {
+		dev_err(dev, "[%s]: ioremap failed!", __func__);
+		ret = PTR_ERR(device_data->base);
+		goto out;
+	}
+
+	spin_lock_init(&device_data->ctx_lock);
+	spin_lock_init(&device_data->power_state_spinlock);
+
+	/* Enable power for CRYP hardware block */
+	device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape");
+	if (IS_ERR(device_data->pwr_regulator)) {
+		dev_err(dev, "[%s]: could not get cryp regulator", __func__);
+		ret = PTR_ERR(device_data->pwr_regulator);
+		device_data->pwr_regulator = NULL;
+		goto out;
+	}
+
+	/* Enable the clk for CRYP hardware block */
+	device_data->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(device_data->clk)) {
+		dev_err(dev, "[%s]: clk_get() failed!", __func__);
+		ret = PTR_ERR(device_data->clk);
+		goto out_regulator;
+	}
+
+	ret = clk_prepare(device_data->clk);
+	if (ret) {
+		dev_err(dev, "[%s]: clk_prepare() failed!", __func__);
+		goto out_regulator;
+	}
+
+	/* Enable device power (and clock) */
+	ret = cryp_enable_power(device_data->dev, device_data, false);
+	if (ret) {
+		dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
+		goto out_clk_unprepare;
+	}
+
+	if (cryp_check(device_data)) {
+		dev_err(dev, "[%s]: cryp_check() failed!", __func__);
+		ret = -EINVAL;
+		goto out_power;
+	}
+
+	if (cryp_configure_protection(device_data, &prot)) {
+		dev_err(dev, "[%s]: cryp_configure_protection() failed!",
+			__func__);
+		ret = -EINVAL;
+		goto out_power;
+	}
+
+	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res_irq) {
+		dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
+			__func__);
+		ret = -ENODEV;
+		goto out_power;
+	}
+
+	ret = devm_request_irq(&pdev->dev, res_irq->start,
+			       cryp_interrupt_handler, 0, "cryp1", device_data);
+	if (ret) {
+		dev_err(dev, "[%s]: Unable to request IRQ", __func__);
+		goto out_power;
+	}
+
+	if (cryp_mode == CRYP_MODE_DMA)
+		cryp_dma_setup_channel(device_data, dev);
+
+	platform_set_drvdata(pdev, device_data);
+
+	/* Put the new device into the device list... */
+	klist_add_tail(&device_data->list_node, &driver_data.device_list);
+
+	/* ... and signal that a new device is available. */
+	up(&driver_data.device_allocation);
+
+	atomic_set(&session_id, 1);
+
+	ret = cryp_algs_register_all();
+	if (ret) {
+		dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
+			__func__);
+		goto out_power;
+	}
+
+	dev_info(dev, "successfully registered\n");
+
+	return 0;
+
+out_power:
+	cryp_disable_power(device_data->dev, device_data, false);
+
+out_clk_unprepare:
+	clk_unprepare(device_data->clk);
+
+out_regulator:
+	regulator_put(device_data->pwr_regulator);
+
+out:
+	return ret;
+}
+
+static int ux500_cryp_remove(struct platform_device *pdev)
+{
+	struct cryp_device_data *device_data;
+
+	dev_dbg(&pdev->dev, "[%s]", __func__);
+	device_data = platform_get_drvdata(pdev);
+	if (!device_data) {
+		dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+			__func__);
+		return -ENOMEM;
+	}
+
+	/* Try to decrease the number of available devices. */
+	if (down_trylock(&driver_data.device_allocation))
+		return -EBUSY;
+
+	/* Check that the device is free */
+	spin_lock(&device_data->ctx_lock);
+	/* current_ctx allocates a device, NULL = unallocated */
+	if (device_data->current_ctx) {
+		/* The device is busy */
+		spin_unlock(&device_data->ctx_lock);
+		/* Return the device to the pool. */
+		up(&driver_data.device_allocation);
+		return -EBUSY;
+	}
+
+	spin_unlock(&device_data->ctx_lock);
+
+	/* Remove the device from the list */
+	if (klist_node_attached(&device_data->list_node))
+		klist_remove(&device_data->list_node);
+
+	/* If this was the last device, remove the services */
+	if (list_empty(&driver_data.device_list.k_list))
+		cryp_algs_unregister_all();
+
+	if (cryp_disable_power(&pdev->dev, device_data, false))
+		dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+			__func__);
+
+	clk_unprepare(device_data->clk);
+	regulator_put(device_data->pwr_regulator);
+
+	return 0;
+}
+
+static void ux500_cryp_shutdown(struct platform_device *pdev)
+{
+	struct cryp_device_data *device_data;
+
+	dev_dbg(&pdev->dev, "[%s]", __func__);
+
+	device_data = platform_get_drvdata(pdev);
+	if (!device_data) {
+		dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+			__func__);
+		return;
+	}
+
+	/* Check that the device is free */
+	spin_lock(&device_data->ctx_lock);
+	/* current_ctx allocates a device, NULL = unallocated */
+	if (!device_data->current_ctx) {
+		if (down_trylock(&driver_data.device_allocation))
+			dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+				"Shutting down anyway...", __func__);
+		/**
+		 * (Allocate the device)
+		 * Need to set this to non-null (dummy) value,
+		 * to avoid usage if context switching.
+		 */
+		device_data->current_ctx++;
+	}
+	spin_unlock(&device_data->ctx_lock);
+
+	/* Remove the device from the list */
+	if (klist_node_attached(&device_data->list_node))
+		klist_remove(&device_data->list_node);
+
+	/* If this was the last device, remove the services */
+	if (list_empty(&driver_data.device_list.k_list))
+		cryp_algs_unregister_all();
+
+	if (cryp_disable_power(&pdev->dev, device_data, false))
+		dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+			__func__);
+
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ux500_cryp_suspend(struct device *dev)
+{
+	int ret;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct cryp_device_data *device_data;
+	struct resource *res_irq;
+	struct cryp_ctx *temp_ctx = NULL;
+
+	dev_dbg(dev, "[%s]", __func__);
+
+	/* Handle state? */
+	device_data = platform_get_drvdata(pdev);
+	if (!device_data) {
+		dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
+		return -ENOMEM;
+	}
+
+	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res_irq)
+		dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
+	else
+		disable_irq(res_irq->start);
+
+	spin_lock(&device_data->ctx_lock);
+	if (!device_data->current_ctx)
+		device_data->current_ctx++;
+	spin_unlock(&device_data->ctx_lock);
+
+	if (device_data->current_ctx == ++temp_ctx) {
+		if (down_interruptible(&driver_data.device_allocation))
+			dev_dbg(dev, "[%s]: down_interruptible() failed",
+				__func__);
+		ret = cryp_disable_power(dev, device_data, false);
+
+	} else
+		ret = cryp_disable_power(dev, device_data, true);
+
+	if (ret)
+		dev_err(dev, "[%s]: cryp_disable_power()", __func__);
+
+	return ret;
+}
+
+static int ux500_cryp_resume(struct device *dev)
+{
+	int ret = 0;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct cryp_device_data *device_data;
+	struct resource *res_irq;
+	struct cryp_ctx *temp_ctx = NULL;
+
+	dev_dbg(dev, "[%s]", __func__);
+
+	device_data = platform_get_drvdata(pdev);
+	if (!device_data) {
+		dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
+		return -ENOMEM;
+	}
+
+	spin_lock(&device_data->ctx_lock);
+	if (device_data->current_ctx == ++temp_ctx)
+		device_data->current_ctx = NULL;
+	spin_unlock(&device_data->ctx_lock);
+
+
+	if (!device_data->current_ctx)
+		up(&driver_data.device_allocation);
+	else
+		ret = cryp_enable_power(dev, device_data, true);
+
+	if (ret)
+		dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
+	else {
+		res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+		if (res_irq)
+			enable_irq(res_irq->start);
+	}
+
+	return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
+
+static const struct of_device_id ux500_cryp_match[] = {
+	{ .compatible = "stericsson,ux500-cryp" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ux500_cryp_match);
+
+static struct platform_driver cryp_driver = {
+	.probe  = ux500_cryp_probe,
+	.remove = ux500_cryp_remove,
+	.shutdown = ux500_cryp_shutdown,
+	.driver = {
+		.name  = "cryp1",
+		.of_match_table = ux500_cryp_match,
+		.pm    = &ux500_cryp_pm,
+	}
+};
+
+static int __init ux500_cryp_mod_init(void)
+{
+	pr_debug("[%s] is called!", __func__);
+	klist_init(&driver_data.device_list, NULL, NULL);
+	/* Initialize the semaphore to 0 devices (locked state) */
+	sema_init(&driver_data.device_allocation, 0);
+	return platform_driver_register(&cryp_driver);
+}
+
+static void __exit ux500_cryp_mod_fini(void)
+{
+	pr_debug("[%s] is called!", __func__);
+	platform_driver_unregister(&cryp_driver);
+}
+
+module_init(ux500_cryp_mod_init);
+module_exit(ux500_cryp_mod_fini);
+
+module_param(cryp_mode, int, 0);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
+MODULE_ALIAS_CRYPTO("aes-all");
+MODULE_ALIAS_CRYPTO("des-all");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
new file mode 100644
index 0000000..08d291c
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+
+#include "cryp.h"
+#include "cryp_p.h"
+#include "cryp_irq.h"
+#include "cryp_irqp.h"
+
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+	u32 i;
+
+	dev_dbg(device_data->dev, "[%s]", __func__);
+
+	i = readl_relaxed(&device_data->base->imsc);
+	i = i | irq_src;
+	writel_relaxed(i, &device_data->base->imsc);
+}
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+	u32 i;
+
+	dev_dbg(device_data->dev, "[%s]", __func__);
+
+	i = readl_relaxed(&device_data->base->imsc);
+	i = i & ~irq_src;
+	writel_relaxed(i, &device_data->base->imsc);
+}
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+	return (readl_relaxed(&device_data->base->mis) & irq_src) > 0;
+}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
new file mode 100644
index 0000000..5a7837f
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_IRQ_H_
+#define _CRYP_IRQ_H_
+
+#include "cryp.h"
+
+enum cryp_irq_src_id {
+	CRYP_IRQ_SRC_INPUT_FIFO = 0x1,
+	CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2,
+	CRYP_IRQ_SRC_ALL = 0x3
+};
+
+/**
+ * M0 Funtions
+ */
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+#endif				/* _CRYP_IRQ_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
new file mode 100644
index 0000000..8b339cc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __CRYP_IRQP_H_
+#define __CRYP_IRQP_H_
+
+#include "cryp_irq.h"
+
+/**
+ *
+ * CRYP Registers - Offset mapping
+ *     +-----------------+
+ * 00h | CRYP_CR         |  Configuration register
+ *     +-----------------+
+ * 04h | CRYP_SR         |  Status register
+ *     +-----------------+
+ * 08h | CRYP_DIN        |  Data In register
+ *     +-----------------+
+ * 0ch | CRYP_DOUT       |  Data out register
+ *     +-----------------+
+ * 10h | CRYP_DMACR      |  DMA control register
+ *     +-----------------+
+ * 14h | CRYP_IMSC       |  IMSC
+ *     +-----------------+
+ * 18h | CRYP_RIS        |  Raw interrupt status
+ *     +-----------------+
+ * 1ch | CRYP_MIS        |  Masked interrupt status.
+ *     +-----------------+
+ *       Key registers
+ *       IVR registers
+ *       Peripheral
+ *       Cell IDs
+ *
+ *       Refer data structure for other register map
+ */
+
+/**
+ * struct cryp_register
+ * @cr			- Configuration register
+ * @status		- Status register
+ * @din			- Data input register
+ * @din_size		- Data input size register
+ * @dout		- Data output register
+ * @dout_size		- Data output size register
+ * @dmacr		- Dma control register
+ * @imsc		- Interrupt mask set/clear register
+ * @ris			- Raw interrupt status
+ * @mis			- Masked interrupt statu register
+ * @key_1_l		- Key register 1 L
+ * @key_1_r		- Key register 1 R
+ * @key_2_l		- Key register 2 L
+ * @key_2_r		- Key register 2 R
+ * @key_3_l		- Key register 3 L
+ * @key_3_r		- Key register 3 R
+ * @key_4_l		- Key register 4 L
+ * @key_4_r		- Key register 4 R
+ * @init_vect_0_l	- init vector 0 L
+ * @init_vect_0_r	- init vector 0 R
+ * @init_vect_1_l	- init vector 1 L
+ * @init_vect_1_r	- init vector 1 R
+ * @cryp_unused1	- unused registers
+ * @itcr		- Integration test control register
+ * @itip		- Integration test input register
+ * @itop		- Integration test output register
+ * @cryp_unused2	- unused registers
+ * @periphId0		- FE0 CRYP Peripheral Identication Register
+ * @periphId1		- FE4
+ * @periphId2		- FE8
+ * @periphId3		- FEC
+ * @pcellId0		- FF0  CRYP PCell Identication Register
+ * @pcellId1		- FF4
+ * @pcellId2		- FF8
+ * @pcellId3		- FFC
+ */
+struct cryp_register {
+	u32 cr;			/* Configuration register   */
+	u32 sr;			/* Status register          */
+	u32 din;		/* Data input register      */
+	u32 din_size;		/* Data input size register */
+	u32 dout;		/* Data output register     */
+	u32 dout_size;		/* Data output size register */
+	u32 dmacr;		/* Dma control register     */
+	u32 imsc;		/* Interrupt mask set/clear register */
+	u32 ris;		/* Raw interrupt status             */
+	u32 mis;		/* Masked interrupt statu register  */
+
+	u32 key_1_l;		/*Key register 1 L */
+	u32 key_1_r;		/*Key register 1 R */
+	u32 key_2_l;		/*Key register 2 L */
+	u32 key_2_r;		/*Key register 2 R */
+	u32 key_3_l;		/*Key register 3 L */
+	u32 key_3_r;		/*Key register 3 R */
+	u32 key_4_l;		/*Key register 4 L */
+	u32 key_4_r;		/*Key register 4 R */
+
+	u32 init_vect_0_l;	/*init vector 0 L */
+	u32 init_vect_0_r;	/*init vector 0 R */
+	u32 init_vect_1_l;	/*init vector 1 L */
+	u32 init_vect_1_r;	/*init vector 1 R */
+
+	u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)];	/* unused registers */
+	u32 itcr;		/*Integration test control register */
+	u32 itip;		/*Integration test input register */
+	u32 itop;		/*Integration test output register */
+	u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)];	/* unused registers */
+
+	u32 periphId0;		/* FE0  CRYP Peripheral Identication Register */
+	u32 periphId1;		/* FE4 */
+	u32 periphId2;		/* FE8 */
+	u32 periphId3;		/* FEC */
+
+	u32 pcellId0;		/* FF0  CRYP PCell Identication Register */
+	u32 pcellId1;		/* FF4 */
+	u32 pcellId2;		/* FF8 */
+	u32 pcellId3;		/* FFC */
+};
+
+#endif
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
new file mode 100644
index 0000000..6dcffe1
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -0,0 +1,123 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_P_H_
+#define _CRYP_P_H_
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include "cryp.h"
+#include "cryp_irqp.h"
+
+/**
+ * Generic Macros
+ */
+#define CRYP_SET_BITS(reg_name, mask) \
+	writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define CRYP_WRITE_BIT(reg_name, val, mask) \
+	writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\
+			((val) & (mask))), reg_name)
+
+#define CRYP_TEST_BITS(reg_name, val) \
+	(readl_relaxed(reg_name) & (val))
+
+#define CRYP_PUT_BITS(reg, val, shift, mask) \
+	writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
+		(((u32)val << shift) & (mask))), reg)
+
+/**
+ * CRYP specific Macros
+ */
+#define CRYP_PERIPHERAL_ID0		0xE3
+#define CRYP_PERIPHERAL_ID1		0x05
+
+#define CRYP_PERIPHERAL_ID2_DB8500	0x28
+#define CRYP_PERIPHERAL_ID3		0x00
+
+#define CRYP_PCELL_ID0			0x0D
+#define CRYP_PCELL_ID1			0xF0
+#define CRYP_PCELL_ID2			0x05
+#define CRYP_PCELL_ID3			0xB1
+
+/**
+ * CRYP register default values
+ */
+#define MAX_DEVICE_SUPPORT		2
+
+/* Priv set, keyrden set and datatype 8bits swapped set as default. */
+#define CRYP_CR_DEFAULT			0x0482
+#define CRYP_DMACR_DEFAULT		0x0
+#define CRYP_IMSC_DEFAULT		0x0
+#define CRYP_DIN_DEFAULT		0x0
+#define CRYP_DOUT_DEFAULT		0x0
+#define CRYP_KEY_DEFAULT		0x0
+#define CRYP_INIT_VECT_DEFAULT		0x0
+
+/**
+ * CRYP Control register specific mask
+ */
+#define CRYP_CR_SECURE_MASK		BIT(0)
+#define CRYP_CR_PRLG_MASK		BIT(1)
+#define CRYP_CR_ALGODIR_MASK		BIT(2)
+#define CRYP_CR_ALGOMODE_MASK		(BIT(5) | BIT(4) | BIT(3))
+#define CRYP_CR_DATATYPE_MASK		(BIT(7) | BIT(6))
+#define CRYP_CR_KEYSIZE_MASK		(BIT(9) | BIT(8))
+#define CRYP_CR_KEYRDEN_MASK		BIT(10)
+#define CRYP_CR_KSE_MASK		BIT(11)
+#define CRYP_CR_START_MASK		BIT(12)
+#define CRYP_CR_INIT_MASK		BIT(13)
+#define CRYP_CR_FFLUSH_MASK		BIT(14)
+#define CRYP_CR_CRYPEN_MASK		BIT(15)
+#define CRYP_CR_CONTEXT_SAVE_MASK	(CRYP_CR_SECURE_MASK |\
+					 CRYP_CR_PRLG_MASK |\
+					 CRYP_CR_ALGODIR_MASK |\
+					 CRYP_CR_ALGOMODE_MASK |\
+					 CRYP_CR_DATATYPE_MASK |\
+					 CRYP_CR_KEYSIZE_MASK |\
+					 CRYP_CR_KEYRDEN_MASK |\
+					 CRYP_CR_DATATYPE_MASK)
+
+
+#define CRYP_SR_INFIFO_READY_MASK	(BIT(0) | BIT(1))
+#define CRYP_SR_IFEM_MASK		BIT(0)
+#define CRYP_SR_BUSY_MASK		BIT(4)
+
+/**
+ * Bit position used while setting bits in register
+ */
+#define CRYP_CR_PRLG_POS		1
+#define CRYP_CR_ALGODIR_POS		2
+#define CRYP_CR_ALGOMODE_POS		3
+#define CRYP_CR_DATATYPE_POS		6
+#define CRYP_CR_KEYSIZE_POS		8
+#define CRYP_CR_KEYRDEN_POS		10
+#define CRYP_CR_KSE_POS			11
+#define CRYP_CR_START_POS		12
+#define CRYP_CR_INIT_POS		13
+#define CRYP_CR_CRYPEN_POS		15
+
+#define CRYP_SR_BUSY_POS		4
+
+/**
+ * CRYP PCRs------PC_NAND control register
+ * BIT_MASK
+ */
+#define CRYP_DMA_REQ_MASK		(BIT(1) | BIT(0))
+#define CRYP_DMA_REQ_MASK_POS		0
+
+
+struct cryp_system_context {
+	/* CRYP Register structure */
+	struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT];
+};
+
+#endif
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
new file mode 100644
index 0000000..784d9c0
--- /dev/null
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_hash_core.o := -DDEBUG
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
+ux500_hash-objs :=  hash_core.o
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
new file mode 100644
index 0000000..be6eb54
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen (shujuan.chen@stericsson.com)
+ * Author: Joakim Bech (joakim.xx.bech@stericsson.com)
+ * Author: Berne Hebark (berne.hebark@stericsson.com))
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _HASH_ALG_H
+#define _HASH_ALG_H
+
+#include <linux/bitops.h>
+
+#define HASH_BLOCK_SIZE			64
+#define HASH_DMA_FIFO			4
+#define HASH_DMA_ALIGN_SIZE		4
+#define HASH_DMA_PERFORMANCE_MIN_SIZE	1024
+#define HASH_BYTES_PER_WORD		4
+
+/* Maximum value of the length's high word */
+#define HASH_HIGH_WORD_MAX_VAL		0xFFFFFFFFUL
+
+/* Power on Reset values HASH registers */
+#define HASH_RESET_CR_VALUE		0x0
+#define HASH_RESET_STR_VALUE		0x0
+
+/* Number of context swap registers */
+#define HASH_CSR_COUNT			52
+
+#define HASH_RESET_CSRX_REG_VALUE	0x0
+#define HASH_RESET_CSFULL_REG_VALUE	0x0
+#define HASH_RESET_CSDATAIN_REG_VALUE	0x0
+
+#define HASH_RESET_INDEX_VAL		0x0
+#define HASH_RESET_BIT_INDEX_VAL	0x0
+#define HASH_RESET_BUFFER_VAL		0x0
+#define HASH_RESET_LEN_HIGH_VAL		0x0
+#define HASH_RESET_LEN_LOW_VAL		0x0
+
+/* Control register bitfields */
+#define HASH_CR_RESUME_MASK	0x11FCF
+
+#define HASH_CR_SWITCHON_POS	31
+#define HASH_CR_SWITCHON_MASK	BIT(31)
+
+#define HASH_CR_EMPTYMSG_POS	20
+#define HASH_CR_EMPTYMSG_MASK	BIT(20)
+
+#define HASH_CR_DINF_POS	12
+#define HASH_CR_DINF_MASK	BIT(12)
+
+#define HASH_CR_NBW_POS		8
+#define HASH_CR_NBW_MASK	0x00000F00UL
+
+#define HASH_CR_LKEY_POS	16
+#define HASH_CR_LKEY_MASK	BIT(16)
+
+#define HASH_CR_ALGO_POS	7
+#define HASH_CR_ALGO_MASK	BIT(7)
+
+#define HASH_CR_MODE_POS	6
+#define HASH_CR_MODE_MASK	BIT(6)
+
+#define HASH_CR_DATAFORM_POS	4
+#define HASH_CR_DATAFORM_MASK	(BIT(4) | BIT(5))
+
+#define HASH_CR_DMAE_POS	3
+#define HASH_CR_DMAE_MASK	BIT(3)
+
+#define HASH_CR_INIT_POS	2
+#define HASH_CR_INIT_MASK	BIT(2)
+
+#define HASH_CR_PRIVN_POS	1
+#define HASH_CR_PRIVN_MASK	BIT(1)
+
+#define HASH_CR_SECN_POS	0
+#define HASH_CR_SECN_MASK	BIT(0)
+
+/* Start register bitfields */
+#define HASH_STR_DCAL_POS	8
+#define HASH_STR_DCAL_MASK	BIT(8)
+#define HASH_STR_DEFAULT	0x0
+
+#define HASH_STR_NBLW_POS	0
+#define HASH_STR_NBLW_MASK	0x0000001FUL
+
+#define HASH_NBLW_MAX_VAL	0x1F
+
+/* PrimeCell IDs */
+#define HASH_P_ID0		0xE0
+#define HASH_P_ID1		0x05
+#define HASH_P_ID2		0x38
+#define HASH_P_ID3		0x00
+#define HASH_CELL_ID0		0x0D
+#define HASH_CELL_ID1		0xF0
+#define HASH_CELL_ID2		0x05
+#define HASH_CELL_ID3		0xB1
+
+#define HASH_SET_BITS(reg_name, mask)	\
+	writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define HASH_CLEAR_BITS(reg_name, mask)	\
+	writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name)
+
+#define HASH_PUT_BITS(reg, val, shift, mask)	\
+	writel_relaxed(((readl(reg) & ~(mask)) |	\
+		(((u32)val << shift) & (mask))), reg)
+
+#define HASH_SET_DIN(val, len)	writesl(&device_data->base->din, (val), (len))
+
+#define HASH_INITIALIZE			\
+	HASH_PUT_BITS(			\
+		&device_data->base->cr,	\
+		0x01, HASH_CR_INIT_POS,	\
+		HASH_CR_INIT_MASK)
+
+#define HASH_SET_DATA_FORMAT(data_format)				\
+		HASH_PUT_BITS(						\
+			&device_data->base->cr,				\
+			(u32) (data_format), HASH_CR_DATAFORM_POS,	\
+			HASH_CR_DATAFORM_MASK)
+#define HASH_SET_NBLW(val)					\
+		HASH_PUT_BITS(					\
+			&device_data->base->str,		\
+			(u32) (val), HASH_STR_NBLW_POS,		\
+			HASH_STR_NBLW_MASK)
+#define HASH_SET_DCAL					\
+		HASH_PUT_BITS(				\
+			&device_data->base->str,	\
+			0x01, HASH_STR_DCAL_POS,	\
+			HASH_STR_DCAL_MASK)
+
+/* Hardware access method */
+enum hash_mode {
+	HASH_MODE_CPU,
+	HASH_MODE_DMA
+};
+
+/**
+ * struct uint64 - Structure to handle 64 bits integers.
+ * @high_word:	Most significant bits.
+ * @low_word:	Least significant bits.
+ *
+ * Used to handle 64 bits integers.
+ */
+struct uint64 {
+	u32 high_word;
+	u32 low_word;
+};
+
+/**
+ * struct hash_register - Contains all registers in ux500 hash hardware.
+ * @cr:		HASH control register (0x000).
+ * @din:	HASH data input register (0x004).
+ * @str:	HASH start register (0x008).
+ * @hx:		HASH digest register 0..7 (0x00c-0x01C).
+ * @padding0:	Reserved (0x02C).
+ * @itcr:	Integration test control register (0x080).
+ * @itip:	Integration test input register (0x084).
+ * @itop:	Integration test output register (0x088).
+ * @padding1:	Reserved (0x08C).
+ * @csfull:	HASH context full register (0x0F8).
+ * @csdatain:	HASH context swap data input register (0x0FC).
+ * @csrx:	HASH context swap register 0..51 (0x100-0x1CC).
+ * @padding2:	Reserved (0x1D0).
+ * @periphid0:	HASH peripheral identification register 0 (0xFE0).
+ * @periphid1:	HASH peripheral identification register 1 (0xFE4).
+ * @periphid2:	HASH peripheral identification register 2 (0xFE8).
+ * @periphid3:	HASH peripheral identification register 3 (0xFEC).
+ * @cellid0:	HASH PCell identification register 0 (0xFF0).
+ * @cellid1:	HASH PCell identification register 1 (0xFF4).
+ * @cellid2:	HASH PCell identification register 2 (0xFF8).
+ * @cellid3:	HASH PCell identification register 3 (0xFFC).
+ *
+ * The device communicates to the HASH via 32-bit-wide control registers
+ * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure
+ * with the registers used.
+ */
+struct hash_register {
+	u32 cr;
+	u32 din;
+	u32 str;
+	u32 hx[8];
+
+	u32 padding0[(0x080 - 0x02C) / sizeof(u32)];
+
+	u32 itcr;
+	u32 itip;
+	u32 itop;
+
+	u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)];
+
+	u32 csfull;
+	u32 csdatain;
+	u32 csrx[HASH_CSR_COUNT];
+
+	u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)];
+
+	u32 periphid0;
+	u32 periphid1;
+	u32 periphid2;
+	u32 periphid3;
+
+	u32 cellid0;
+	u32 cellid1;
+	u32 cellid2;
+	u32 cellid3;
+};
+
+/**
+ * struct hash_state - Hash context state.
+ * @temp_cr:	Temporary HASH Control Register.
+ * @str_reg:	HASH Start Register.
+ * @din_reg:	HASH Data Input Register.
+ * @csr[52]:	HASH Context Swap Registers 0-39.
+ * @csfull:	HASH Context Swap Registers 40 ie Status flags.
+ * @csdatain:	HASH Context Swap Registers 41 ie Input data.
+ * @buffer:	Working buffer for messages going to the hardware.
+ * @length:	Length of the part of message hashed so far (floor(N/64) * 64).
+ * @index:	Valid number of bytes in buffer (N % 64).
+ * @bit_index:	Valid number of bits in buffer (N % 8).
+ *
+ * This structure is used between context switches, i.e. when ongoing jobs are
+ * interupted with new jobs. When this happens we need to store intermediate
+ * results in software.
+ *
+ * WARNING: "index" is the  member of the structure, to be sure  that "buffer"
+ * is aligned on a 4-bytes boundary. This is highly implementation dependent
+ * and MUST be checked whenever this code is ported on new platforms.
+ */
+struct hash_state {
+	u32		temp_cr;
+	u32		str_reg;
+	u32		din_reg;
+	u32		csr[52];
+	u32		csfull;
+	u32		csdatain;
+	u32		buffer[HASH_BLOCK_SIZE / sizeof(u32)];
+	struct uint64	length;
+	u8		index;
+	u8		bit_index;
+};
+
+/**
+ * enum hash_device_id - HASH device ID.
+ * @HASH_DEVICE_ID_0: Hash hardware with ID 0
+ * @HASH_DEVICE_ID_1: Hash hardware with ID 1
+ */
+enum hash_device_id {
+	HASH_DEVICE_ID_0 = 0,
+	HASH_DEVICE_ID_1 = 1
+};
+
+/**
+ * enum hash_data_format - HASH data format.
+ * @HASH_DATA_32_BITS:	32 bits data format
+ * @HASH_DATA_16_BITS:	16 bits data format
+ * @HASH_DATA_8_BITS:	8 bits data format.
+ * @HASH_DATA_1_BITS:	1 bit data format.
+ */
+enum hash_data_format {
+	HASH_DATA_32_BITS	= 0x0,
+	HASH_DATA_16_BITS	= 0x1,
+	HASH_DATA_8_BITS	= 0x2,
+	HASH_DATA_1_BIT		= 0x3
+};
+
+/**
+ * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm.
+ * @HASH_ALGO_SHA1: Indicates that SHA1 is used.
+ * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used.
+ */
+enum hash_algo {
+	HASH_ALGO_SHA1		= 0x0,
+	HASH_ALGO_SHA256	= 0x1
+};
+
+/**
+ * enum hash_op - Enumeration for selecting between HASH or HMAC mode.
+ * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode.
+ * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC.
+ */
+enum hash_op {
+	HASH_OPER_MODE_HASH = 0x0,
+	HASH_OPER_MODE_HMAC = 0x1
+};
+
+/**
+ * struct hash_config - Configuration data for the hardware.
+ * @data_format:	Format of data entered into the hash data in register.
+ * @algorithm:		Algorithm selection bit.
+ * @oper_mode:		Operating mode selection bit.
+ */
+struct hash_config {
+	int data_format;
+	int algorithm;
+	int oper_mode;
+};
+
+/**
+ * struct hash_dma - Structure used for dma.
+ * @mask:		DMA capabilities bitmap mask.
+ * @complete:		Used to maintain state for a "completion".
+ * @chan_mem2hash:	DMA channel.
+ * @cfg_mem2hash:	DMA channel configuration.
+ * @sg_len:		Scatterlist length.
+ * @sg:			Scatterlist.
+ * @nents:		Number of sg entries.
+ */
+struct hash_dma {
+	dma_cap_mask_t		mask;
+	struct completion	complete;
+	struct dma_chan		*chan_mem2hash;
+	void			*cfg_mem2hash;
+	int			sg_len;
+	struct scatterlist	*sg;
+	int			nents;
+};
+
+/**
+ * struct hash_ctx - The context used for hash calculations.
+ * @key:	The key used in the operation.
+ * @keylen:	The length of the key.
+ * @state:	The state of the current calculations.
+ * @config:	The current configuration.
+ * @digestsize:	The size of current digest.
+ * @device:	Pointer to the device structure.
+ */
+struct hash_ctx {
+	u8			*key;
+	u32			keylen;
+	struct hash_config	config;
+	int			digestsize;
+	struct hash_device_data	*device;
+};
+
+/**
+ * struct hash_ctx - The request context used for hash calculations.
+ * @state:	The state of the current calculations.
+ * @dma_mode:	Used in special cases (workaround), e.g. need to change to
+ *		cpu mode, if not supported/working in dma mode.
+ * @updated:	Indicates if hardware is initialized for new operations.
+ */
+struct hash_req_ctx {
+	struct hash_state	state;
+	bool			dma_mode;
+	u8			updated;
+};
+
+/**
+ * struct hash_device_data - structure for a hash device.
+ * @base:		Pointer to virtual base address of the hash device.
+ * @phybase:		Pointer to physical memory location of the hash device.
+ * @list_node:		For inclusion in klist.
+ * @dev:		Pointer to the device dev structure.
+ * @ctx_lock:		Spinlock for current_ctx.
+ * @current_ctx:	Pointer to the currently allocated context.
+ * @power_state:	TRUE = power state on, FALSE = power state off.
+ * @power_state_lock:	Spinlock for power_state.
+ * @regulator:		Pointer to the device's power control.
+ * @clk:		Pointer to the device's clock control.
+ * @restore_dev_state:	TRUE = saved state, FALSE = no saved state.
+ * @dma:		Structure used for dma.
+ */
+struct hash_device_data {
+	struct hash_register __iomem	*base;
+	phys_addr_t             phybase;
+	struct klist_node	list_node;
+	struct device		*dev;
+	struct spinlock		ctx_lock;
+	struct hash_ctx		*current_ctx;
+	bool			power_state;
+	struct spinlock		power_state_lock;
+	struct regulator	*regulator;
+	struct clk		*clk;
+	bool			restore_dev_state;
+	struct hash_state	state; /* Used for saving and resuming state */
+	struct hash_dma		dma;
+};
+
+int hash_check_hw(struct hash_device_data *device_data);
+
+int hash_setconfiguration(struct hash_device_data *device_data,
+		struct hash_config *config);
+
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx);
+
+void hash_get_digest(struct hash_device_data *device_data,
+		u8 *digest, int algorithm);
+
+int hash_hw_update(struct ahash_request *req);
+
+int hash_save_state(struct hash_device_data *device_data,
+		struct hash_state *state);
+
+int hash_resume_state(struct hash_device_data *device_data,
+		const struct hash_state *state);
+
+#endif
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
new file mode 100644
index 0000000..633321a
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -0,0 +1,1964 @@
+/*
+ * Cryptographic API.
+ * Support for Nomadik hardware crypto engine.
+
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) "hashX hashX: " fmt
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/klist.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/bitops.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <linux/platform_data/crypto-ux500.h>
+
+#include "hash_alg.h"
+
+static int hash_mode;
+module_param(hash_mode, int, 0);
+MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
+
+/* HMAC-SHA1, no key */
+static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
+	0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
+	0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
+	0x70, 0x69, 0x0e, 0x1d
+};
+
+/* HMAC-SHA256, no key */
+static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
+	0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
+	0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
+	0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
+	0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
+};
+
+/**
+ * struct hash_driver_data - data specific to the driver.
+ *
+ * @device_list:	A list of registered devices to choose from.
+ * @device_allocation:	A semaphore initialized with number of devices.
+ */
+struct hash_driver_data {
+	struct klist		device_list;
+	struct semaphore	device_allocation;
+};
+
+static struct hash_driver_data	driver_data;
+
+/* Declaration of functions */
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data:	Structure for the hash device.
+ * @message:		Last word of a message
+ * @index_bytes:	The number of bytes in the last message
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+			    const u32 *message, u8 index_bytes);
+
+/**
+ * release_hash_device - Releases a previously allocated hash device.
+ * @device_data:	Structure for the hash device.
+ *
+ */
+static void release_hash_device(struct hash_device_data *device_data)
+{
+	spin_lock(&device_data->ctx_lock);
+	device_data->current_ctx->device = NULL;
+	device_data->current_ctx = NULL;
+	spin_unlock(&device_data->ctx_lock);
+
+	/*
+	 * The down_interruptible part for this semaphore is called in
+	 * cryp_get_device_data.
+	 */
+	up(&driver_data.device_allocation);
+}
+
+static void hash_dma_setup_channel(struct hash_device_data *device_data,
+				   struct device *dev)
+{
+	struct hash_platform_data *platform_data = dev->platform_data;
+	struct dma_slave_config conf = {
+		.direction = DMA_MEM_TO_DEV,
+		.dst_addr = device_data->phybase + HASH_DMA_FIFO,
+		.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+		.dst_maxburst = 16,
+	};
+
+	dma_cap_zero(device_data->dma.mask);
+	dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+	device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
+	device_data->dma.chan_mem2hash =
+		dma_request_channel(device_data->dma.mask,
+				    platform_data->dma_filter,
+				    device_data->dma.cfg_mem2hash);
+
+	dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
+
+	init_completion(&device_data->dma.complete);
+}
+
+static void hash_dma_callback(void *data)
+{
+	struct hash_ctx *ctx = data;
+
+	complete(&ctx->device->dma.complete);
+}
+
+static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
+				 int len, enum dma_data_direction direction)
+{
+	struct dma_async_tx_descriptor *desc = NULL;
+	struct dma_chan *channel = NULL;
+	dma_cookie_t cookie;
+
+	if (direction != DMA_TO_DEVICE) {
+		dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
+
+	channel = ctx->device->dma.chan_mem2hash;
+	ctx->device->dma.sg = sg;
+	ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
+			ctx->device->dma.sg, ctx->device->dma.nents,
+			direction);
+
+	if (!ctx->device->dma.sg_len) {
+		dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
+			__func__);
+		return -EFAULT;
+	}
+
+	dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
+		__func__);
+	desc = dmaengine_prep_slave_sg(channel,
+			ctx->device->dma.sg, ctx->device->dma.sg_len,
+			direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+	if (!desc) {
+		dev_err(ctx->device->dev,
+			"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
+		return -EFAULT;
+	}
+
+	desc->callback = hash_dma_callback;
+	desc->callback_param = ctx;
+
+	cookie = dmaengine_submit(desc);
+	dma_async_issue_pending(channel);
+
+	return 0;
+}
+
+static void hash_dma_done(struct hash_ctx *ctx)
+{
+	struct dma_chan *chan;
+
+	chan = ctx->device->dma.chan_mem2hash;
+	dmaengine_terminate_all(chan);
+	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
+		     ctx->device->dma.sg_len, DMA_TO_DEVICE);
+}
+
+static int hash_dma_write(struct hash_ctx *ctx,
+			  struct scatterlist *sg, int len)
+{
+	int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+	if (error) {
+		dev_dbg(ctx->device->dev,
+			"%s: hash_set_dma_transfer() failed\n", __func__);
+		return error;
+	}
+
+	return len;
+}
+
+/**
+ * get_empty_message_digest - Returns a pre-calculated digest for
+ * the empty message.
+ * @device_data:	Structure for the hash device.
+ * @zero_hash:		Buffer to return the empty message digest.
+ * @zero_hash_size:	Hash size of the empty message digest.
+ * @zero_digest:	True if zero_digest returned.
+ */
+static int get_empty_message_digest(
+		struct hash_device_data *device_data,
+		u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
+{
+	int ret = 0;
+	struct hash_ctx *ctx = device_data->current_ctx;
+	*zero_digest = false;
+
+	/**
+	 * Caller responsible for ctx != NULL.
+	 */
+
+	if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
+		if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+			memcpy(zero_hash, &sha1_zero_message_hash[0],
+			       SHA1_DIGEST_SIZE);
+			*zero_hash_size = SHA1_DIGEST_SIZE;
+			*zero_digest = true;
+		} else if (HASH_ALGO_SHA256 ==
+				ctx->config.algorithm) {
+			memcpy(zero_hash, &sha256_zero_message_hash[0],
+			       SHA256_DIGEST_SIZE);
+			*zero_hash_size = SHA256_DIGEST_SIZE;
+			*zero_digest = true;
+		} else {
+			dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
+				__func__);
+			ret = -EINVAL;
+			goto out;
+		}
+	} else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
+		if (!ctx->keylen) {
+			if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+				memcpy(zero_hash, &zero_message_hmac_sha1[0],
+				       SHA1_DIGEST_SIZE);
+				*zero_hash_size = SHA1_DIGEST_SIZE;
+				*zero_digest = true;
+			} else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
+				memcpy(zero_hash, &zero_message_hmac_sha256[0],
+				       SHA256_DIGEST_SIZE);
+				*zero_hash_size = SHA256_DIGEST_SIZE;
+				*zero_digest = true;
+			} else {
+				dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
+					__func__);
+				ret = -EINVAL;
+				goto out;
+			}
+		} else {
+			dev_dbg(device_data->dev,
+				"%s: Continue hash calculation, since hmac key available\n",
+				__func__);
+		}
+	}
+out:
+
+	return ret;
+}
+
+/**
+ * hash_disable_power - Request to disable power and clock.
+ * @device_data:	Structure for the hash device.
+ * @save_device_state:	If true, saves the current hw state.
+ *
+ * This function request for disabling power (regulator) and clock,
+ * and could also save current hw state.
+ */
+static int hash_disable_power(struct hash_device_data *device_data,
+			      bool save_device_state)
+{
+	int ret = 0;
+	struct device *dev = device_data->dev;
+
+	spin_lock(&device_data->power_state_lock);
+	if (!device_data->power_state)
+		goto out;
+
+	if (save_device_state) {
+		hash_save_state(device_data,
+				&device_data->state);
+		device_data->restore_dev_state = true;
+	}
+
+	clk_disable(device_data->clk);
+	ret = regulator_disable(device_data->regulator);
+	if (ret)
+		dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
+
+	device_data->power_state = false;
+
+out:
+	spin_unlock(&device_data->power_state_lock);
+
+	return ret;
+}
+
+/**
+ * hash_enable_power - Request to enable power and clock.
+ * @device_data:		Structure for the hash device.
+ * @restore_device_state:	If true, restores a previous saved hw state.
+ *
+ * This function request for enabling power (regulator) and clock,
+ * and could also restore a previously saved hw state.
+ */
+static int hash_enable_power(struct hash_device_data *device_data,
+			     bool restore_device_state)
+{
+	int ret = 0;
+	struct device *dev = device_data->dev;
+
+	spin_lock(&device_data->power_state_lock);
+	if (!device_data->power_state) {
+		ret = regulator_enable(device_data->regulator);
+		if (ret) {
+			dev_err(dev, "%s: regulator_enable() failed!\n",
+				__func__);
+			goto out;
+		}
+		ret = clk_enable(device_data->clk);
+		if (ret) {
+			dev_err(dev, "%s: clk_enable() failed!\n", __func__);
+			ret = regulator_disable(
+					device_data->regulator);
+			goto out;
+		}
+		device_data->power_state = true;
+	}
+
+	if (device_data->restore_dev_state) {
+		if (restore_device_state) {
+			device_data->restore_dev_state = false;
+			hash_resume_state(device_data, &device_data->state);
+		}
+	}
+out:
+	spin_unlock(&device_data->power_state_lock);
+
+	return ret;
+}
+
+/**
+ * hash_get_device_data - Checks for an available hash device and return it.
+ * @hash_ctx:		Structure for the hash context.
+ * @device_data:	Structure for the hash device.
+ *
+ * This function check for an available hash device and return it to
+ * the caller.
+ * Note! Caller need to release the device, calling up().
+ */
+static int hash_get_device_data(struct hash_ctx *ctx,
+				struct hash_device_data **device_data)
+{
+	int			ret;
+	struct klist_iter	device_iterator;
+	struct klist_node	*device_node;
+	struct hash_device_data *local_device_data = NULL;
+
+	/* Wait until a device is available */
+	ret = down_interruptible(&driver_data.device_allocation);
+	if (ret)
+		return ret;  /* Interrupted */
+
+	/* Select a device */
+	klist_iter_init(&driver_data.device_list, &device_iterator);
+	device_node = klist_next(&device_iterator);
+	while (device_node) {
+		local_device_data = container_of(device_node,
+					   struct hash_device_data, list_node);
+		spin_lock(&local_device_data->ctx_lock);
+		/* current_ctx allocates a device, NULL = unallocated */
+		if (local_device_data->current_ctx) {
+			device_node = klist_next(&device_iterator);
+		} else {
+			local_device_data->current_ctx = ctx;
+			ctx->device = local_device_data;
+			spin_unlock(&local_device_data->ctx_lock);
+			break;
+		}
+		spin_unlock(&local_device_data->ctx_lock);
+	}
+	klist_iter_exit(&device_iterator);
+
+	if (!device_node) {
+		/**
+		 * No free device found.
+		 * Since we allocated a device with down_interruptible, this
+		 * should not be able to happen.
+		 * Number of available devices, which are contained in
+		 * device_allocation, is therefore decremented by not doing
+		 * an up(device_allocation).
+		 */
+		return -EBUSY;
+	}
+
+	*device_data = local_device_data;
+
+	return 0;
+}
+
+/**
+ * hash_hw_write_key - Writes the key to the hardware registries.
+ *
+ * @device_data:	Structure for the hash device.
+ * @key:		Key to be written.
+ * @keylen:		The lengt of the key.
+ *
+ * Note! This function DOES NOT write to the NBLW registry, even though
+ * specified in the the hw design spec. Either due to incorrect info in the
+ * spec or due to a bug in the hw.
+ */
+static void hash_hw_write_key(struct hash_device_data *device_data,
+			      const u8 *key, unsigned int keylen)
+{
+	u32 word = 0;
+	int nwords = 1;
+
+	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+	while (keylen >= 4) {
+		u32 *key_word = (u32 *)key;
+
+		HASH_SET_DIN(key_word, nwords);
+		keylen -= 4;
+		key += 4;
+	}
+
+	/* Take care of the remaining bytes in the last word */
+	if (keylen) {
+		word = 0;
+		while (keylen) {
+			word |= (key[keylen - 1] << (8 * (keylen - 1)));
+			keylen--;
+		}
+
+		HASH_SET_DIN(&word, nwords);
+	}
+
+	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
+		cpu_relax();
+
+	HASH_SET_DCAL;
+
+	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
+		cpu_relax();
+}
+
+/**
+ * init_hash_hw - Initialise the hash hardware for a new calculation.
+ * @device_data:	Structure for the hash device.
+ * @ctx:		The hash context.
+ *
+ * This function will enable the bits needed to clear and start a new
+ * calculation.
+ */
+static int init_hash_hw(struct hash_device_data *device_data,
+			struct hash_ctx *ctx)
+{
+	int ret = 0;
+
+	ret = hash_setconfiguration(device_data, &ctx->config);
+	if (ret) {
+		dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
+			__func__);
+		return ret;
+	}
+
+	hash_begin(device_data, ctx);
+
+	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+		hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+	return ret;
+}
+
+/**
+ * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
+ *
+ * @sg:		Scatterlist.
+ * @size:	Size in bytes.
+ * @aligned:	True if sg data aligned to work in DMA mode.
+ *
+ */
+static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
+{
+	int nents = 0;
+	bool aligned_data = true;
+
+	while (size > 0 && sg) {
+		nents++;
+		size -= sg->length;
+
+		/* hash_set_dma_transfer will align last nent */
+		if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
+		    (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
+			aligned_data = false;
+
+		sg = sg_next(sg);
+	}
+
+	if (aligned)
+		*aligned = aligned_data;
+
+	if (size != 0)
+		return -EFAULT;
+
+	return nents;
+}
+
+/**
+ * hash_dma_valid_data - checks for dma valid sg data.
+ * @sg:		Scatterlist.
+ * @datasize:	Datasize in bytes.
+ *
+ * NOTE! This function checks for dma valid sg data, since dma
+ * only accept datasizes of even wordsize.
+ */
+static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
+{
+	bool aligned;
+
+	/* Need to include at least one nent, else error */
+	if (hash_get_nents(sg, datasize, &aligned) < 1)
+		return false;
+
+	return aligned;
+}
+
+/**
+ * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ *
+ * Initialize structures.
+ */
+static int hash_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+	if (!ctx->key)
+		ctx->keylen = 0;
+
+	memset(&req_ctx->state, 0, sizeof(struct hash_state));
+	req_ctx->updated = 0;
+	if (hash_mode == HASH_MODE_DMA) {
+		if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
+			req_ctx->dma_mode = false; /* Don't use DMA */
+
+			pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
+				 __func__, HASH_DMA_ALIGN_SIZE);
+		} else {
+			if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
+			    hash_dma_valid_data(req->src, req->nbytes)) {
+				req_ctx->dma_mode = true;
+			} else {
+				req_ctx->dma_mode = false;
+				pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
+					 __func__,
+					 HASH_DMA_PERFORMANCE_MIN_SIZE);
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * hash_processblock - This function processes a single block of 512 bits (64
+ *                     bytes), word aligned, starting at message.
+ * @device_data:	Structure for the hash device.
+ * @message:		Block (512 bits) of message to be written to
+ *			the HASH hardware.
+ *
+ */
+static void hash_processblock(struct hash_device_data *device_data,
+			      const u32 *message, int length)
+{
+	int len = length / HASH_BYTES_PER_WORD;
+	/*
+	 * NBLW bits. Reset the number of bits in last word (NBLW).
+	 */
+	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+	/*
+	 * Write message data to the HASH_DIN register.
+	 */
+	HASH_SET_DIN(message, len);
+}
+
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data:	Structure for the hash device.
+ * @message:		Last word of a message.
+ * @index_bytes:	The number of bytes in the last message.
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+			    const u32 *message, u8 index_bytes)
+{
+	int nwords = 1;
+
+	/*
+	 * Clear hash str register, only clear NBLW
+	 * since DCAL will be reset by hardware.
+	 */
+	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+	/* Main loop */
+	while (index_bytes >= 4) {
+		HASH_SET_DIN(message, nwords);
+		index_bytes -= 4;
+		message++;
+	}
+
+	if (index_bytes)
+		HASH_SET_DIN(message, nwords);
+
+	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
+		cpu_relax();
+
+	/* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
+	HASH_SET_NBLW(index_bytes * 8);
+	dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
+		__func__, readl_relaxed(&device_data->base->din),
+		readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
+	HASH_SET_DCAL;
+	dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
+		__func__, readl_relaxed(&device_data->base->din),
+		readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
+
+	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
+		cpu_relax();
+}
+
+/**
+ * hash_incrementlength - Increments the length of the current message.
+ * @ctx: Hash context
+ * @incr: Length of message processed already
+ *
+ * Overflow cannot occur, because conditions for overflow are checked in
+ * hash_hw_update.
+ */
+static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
+{
+	ctx->state.length.low_word += incr;
+
+	/* Check for wrap-around */
+	if (ctx->state.length.low_word < incr)
+		ctx->state.length.high_word++;
+}
+
+/**
+ * hash_setconfiguration - Sets the required configuration for the hash
+ *                         hardware.
+ * @device_data:	Structure for the hash device.
+ * @config:		Pointer to a configuration structure.
+ */
+int hash_setconfiguration(struct hash_device_data *device_data,
+			  struct hash_config *config)
+{
+	int ret = 0;
+
+	if (config->algorithm != HASH_ALGO_SHA1 &&
+	    config->algorithm != HASH_ALGO_SHA256)
+		return -EPERM;
+
+	/*
+	 * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
+	 * to be written to HASH_DIN is considered as 32 bits.
+	 */
+	HASH_SET_DATA_FORMAT(config->data_format);
+
+	/*
+	 * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
+	 */
+	switch (config->algorithm) {
+	case HASH_ALGO_SHA1:
+		HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+		break;
+
+	case HASH_ALGO_SHA256:
+		HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+		break;
+
+	default:
+		dev_err(device_data->dev, "%s: Incorrect algorithm\n",
+			__func__);
+		return -EPERM;
+	}
+
+	/*
+	 * MODE bit. This bit selects between HASH or HMAC mode for the
+	 * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
+	 */
+	if (HASH_OPER_MODE_HASH == config->oper_mode)
+		HASH_CLEAR_BITS(&device_data->base->cr,
+				HASH_CR_MODE_MASK);
+	else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
+		HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
+		if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
+			/* Truncate key to blocksize */
+			dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
+			HASH_SET_BITS(&device_data->base->cr,
+				      HASH_CR_LKEY_MASK);
+		} else {
+			dev_dbg(device_data->dev, "%s: LKEY cleared\n",
+				__func__);
+			HASH_CLEAR_BITS(&device_data->base->cr,
+					HASH_CR_LKEY_MASK);
+		}
+	} else {	/* Wrong hash mode */
+		ret = -EPERM;
+		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
+			__func__);
+	}
+	return ret;
+}
+
+/**
+ * hash_begin - This routine resets some globals and initializes the hash
+ *              hardware.
+ * @device_data:	Structure for the hash device.
+ * @ctx:		Hash context.
+ */
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
+{
+	/* HW and SW initializations */
+	/* Note: there is no need to initialize buffer and digest members */
+
+	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
+		cpu_relax();
+
+	/*
+	 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+	 * prepare the initialize the HASH accelerator to compute the message
+	 * digest of a new message.
+	 */
+	HASH_INITIALIZE;
+
+	/*
+	 * NBLW bits. Reset the number of bits in last word (NBLW).
+	 */
+	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+}
+
+static int hash_process_data(struct hash_device_data *device_data,
+			     struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
+			     int msg_length, u8 *data_buffer, u8 *buffer,
+			     u8 *index)
+{
+	int ret = 0;
+	u32 count;
+
+	do {
+		if ((*index + msg_length) < HASH_BLOCK_SIZE) {
+			for (count = 0; count < msg_length; count++) {
+				buffer[*index + count] =
+					*(data_buffer + count);
+			}
+			*index += msg_length;
+			msg_length = 0;
+		} else {
+			if (req_ctx->updated) {
+				ret = hash_resume_state(device_data,
+						&device_data->state);
+				memmove(req_ctx->state.buffer,
+					device_data->state.buffer,
+					HASH_BLOCK_SIZE);
+				if (ret) {
+					dev_err(device_data->dev,
+						"%s: hash_resume_state() failed!\n",
+						__func__);
+					goto out;
+				}
+			} else {
+				ret = init_hash_hw(device_data, ctx);
+				if (ret) {
+					dev_err(device_data->dev,
+						"%s: init_hash_hw() failed!\n",
+						__func__);
+					goto out;
+				}
+				req_ctx->updated = 1;
+			}
+			/*
+			 * If 'data_buffer' is four byte aligned and
+			 * local buffer does not have any data, we can
+			 * write data directly from 'data_buffer' to
+			 * HW peripheral, otherwise we first copy data
+			 * to a local buffer
+			 */
+			if ((0 == (((u32)data_buffer) % 4)) &&
+			    (0 == *index))
+				hash_processblock(device_data,
+						  (const u32 *)data_buffer,
+						  HASH_BLOCK_SIZE);
+			else {
+				for (count = 0;
+				     count < (u32)(HASH_BLOCK_SIZE - *index);
+				     count++) {
+					buffer[*index + count] =
+						*(data_buffer + count);
+				}
+				hash_processblock(device_data,
+						  (const u32 *)buffer,
+						  HASH_BLOCK_SIZE);
+			}
+			hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
+			data_buffer += (HASH_BLOCK_SIZE - *index);
+
+			msg_length -= (HASH_BLOCK_SIZE - *index);
+			*index = 0;
+
+			ret = hash_save_state(device_data,
+					&device_data->state);
+
+			memmove(device_data->state.buffer,
+				req_ctx->state.buffer,
+				HASH_BLOCK_SIZE);
+			if (ret) {
+				dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
+					__func__);
+				goto out;
+			}
+		}
+	} while (msg_length != 0);
+out:
+
+	return ret;
+}
+
+/**
+ * hash_dma_final - The hash dma final function for SHA1/SHA256.
+ * @req:	The hash request for the job.
+ */
+static int hash_dma_final(struct ahash_request *req)
+{
+	int ret = 0;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct hash_device_data *device_data;
+	u8 digest[SHA256_DIGEST_SIZE];
+	int bytes_written = 0;
+
+	ret = hash_get_device_data(ctx, &device_data);
+	if (ret)
+		return ret;
+
+	dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
+
+	if (req_ctx->updated) {
+		ret = hash_resume_state(device_data, &device_data->state);
+
+		if (ret) {
+			dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
+				__func__);
+			goto out;
+		}
+	}
+
+	if (!req_ctx->updated) {
+		ret = hash_setconfiguration(device_data, &ctx->config);
+		if (ret) {
+			dev_err(device_data->dev,
+				"%s: hash_setconfiguration() failed!\n",
+				__func__);
+			goto out;
+		}
+
+		/* Enable DMA input */
+		if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
+			HASH_CLEAR_BITS(&device_data->base->cr,
+					HASH_CR_DMAE_MASK);
+		} else {
+			HASH_SET_BITS(&device_data->base->cr,
+				      HASH_CR_DMAE_MASK);
+			HASH_SET_BITS(&device_data->base->cr,
+				      HASH_CR_PRIVN_MASK);
+		}
+
+		HASH_INITIALIZE;
+
+		if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+			hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+		/* Number of bits in last word = (nbytes * 8) % 32 */
+		HASH_SET_NBLW((req->nbytes * 8) % 32);
+		req_ctx->updated = 1;
+	}
+
+	/* Store the nents in the dma struct. */
+	ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
+	if (!ctx->device->dma.nents) {
+		dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
+			__func__);
+		ret = ctx->device->dma.nents;
+		goto out;
+	}
+
+	bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
+	if (bytes_written != req->nbytes) {
+		dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
+			__func__);
+		ret = bytes_written;
+		goto out;
+	}
+
+	wait_for_completion(&ctx->device->dma.complete);
+	hash_dma_done(ctx);
+
+	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
+		cpu_relax();
+
+	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+		unsigned int keylen = ctx->keylen;
+		u8 *key = ctx->key;
+
+		dev_dbg(device_data->dev, "%s: keylen: %d\n",
+			__func__, ctx->keylen);
+		hash_hw_write_key(device_data, key, keylen);
+	}
+
+	hash_get_digest(device_data, digest, ctx->config.algorithm);
+	memcpy(req->result, digest, ctx->digestsize);
+
+out:
+	release_hash_device(device_data);
+
+	/**
+	 * Allocated in setkey, and only used in HMAC.
+	 */
+	kfree(ctx->key);
+
+	return ret;
+}
+
+/**
+ * hash_hw_final - The final hash calculation function
+ * @req:	The hash request for the job.
+ */
+static int hash_hw_final(struct ahash_request *req)
+{
+	int ret = 0;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct hash_device_data *device_data;
+	u8 digest[SHA256_DIGEST_SIZE];
+
+	ret = hash_get_device_data(ctx, &device_data);
+	if (ret)
+		return ret;
+
+	dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
+
+	if (req_ctx->updated) {
+		ret = hash_resume_state(device_data, &device_data->state);
+
+		if (ret) {
+			dev_err(device_data->dev,
+				"%s: hash_resume_state() failed!\n", __func__);
+			goto out;
+		}
+	} else if (req->nbytes == 0 && ctx->keylen == 0) {
+		u8 zero_hash[SHA256_DIGEST_SIZE];
+		u32 zero_hash_size = 0;
+		bool zero_digest = false;
+		/**
+		 * Use a pre-calculated empty message digest
+		 * (workaround since hw return zeroes, hw bug!?)
+		 */
+		ret = get_empty_message_digest(device_data, &zero_hash[0],
+				&zero_hash_size, &zero_digest);
+		if (!ret && likely(zero_hash_size == ctx->digestsize) &&
+		    zero_digest) {
+			memcpy(req->result, &zero_hash[0], ctx->digestsize);
+			goto out;
+		} else if (!ret && !zero_digest) {
+			dev_dbg(device_data->dev,
+				"%s: HMAC zero msg with key, continue...\n",
+				__func__);
+		} else {
+			dev_err(device_data->dev,
+				"%s: ret=%d, or wrong digest size? %s\n",
+				__func__, ret,
+				zero_hash_size == ctx->digestsize ?
+				"true" : "false");
+			/* Return error */
+			goto out;
+		}
+	} else if (req->nbytes == 0 && ctx->keylen > 0) {
+		dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
+			__func__);
+		goto out;
+	}
+
+	if (!req_ctx->updated) {
+		ret = init_hash_hw(device_data, ctx);
+		if (ret) {
+			dev_err(device_data->dev,
+				"%s: init_hash_hw() failed!\n", __func__);
+			goto out;
+		}
+	}
+
+	if (req_ctx->state.index) {
+		hash_messagepad(device_data, req_ctx->state.buffer,
+				req_ctx->state.index);
+	} else {
+		HASH_SET_DCAL;
+		while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
+			cpu_relax();
+	}
+
+	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+		unsigned int keylen = ctx->keylen;
+		u8 *key = ctx->key;
+
+		dev_dbg(device_data->dev, "%s: keylen: %d\n",
+			__func__, ctx->keylen);
+		hash_hw_write_key(device_data, key, keylen);
+	}
+
+	hash_get_digest(device_data, digest, ctx->config.algorithm);
+	memcpy(req->result, digest, ctx->digestsize);
+
+out:
+	release_hash_device(device_data);
+
+	/**
+	 * Allocated in setkey, and only used in HMAC.
+	 */
+	kfree(ctx->key);
+
+	return ret;
+}
+
+/**
+ * hash_hw_update - Updates current HASH computation hashing another part of
+ *                  the message.
+ * @req:	Byte array containing the message to be hashed (caller
+ *		allocated).
+ */
+int hash_hw_update(struct ahash_request *req)
+{
+	int ret = 0;
+	u8 index = 0;
+	u8 *buffer;
+	struct hash_device_data *device_data;
+	u8 *data_buffer;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct crypto_hash_walk walk;
+	int msg_length = crypto_hash_walk_first(req, &walk);
+
+	/* Empty message ("") is correct indata */
+	if (msg_length == 0)
+		return ret;
+
+	index = req_ctx->state.index;
+	buffer = (u8 *)req_ctx->state.buffer;
+
+	/* Check if ctx->state.length + msg_length
+	   overflows */
+	if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
+	    HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
+		pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
+		return -EPERM;
+	}
+
+	ret = hash_get_device_data(ctx, &device_data);
+	if (ret)
+		return ret;
+
+	/* Main loop */
+	while (0 != msg_length) {
+		data_buffer = walk.data;
+		ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
+				data_buffer, buffer, &index);
+
+		if (ret) {
+			dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
+				__func__);
+			goto out;
+		}
+
+		msg_length = crypto_hash_walk_done(&walk, 0);
+	}
+
+	req_ctx->state.index = index;
+	dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
+		__func__, req_ctx->state.index, req_ctx->state.bit_index);
+
+out:
+	release_hash_device(device_data);
+
+	return ret;
+}
+
+/**
+ * hash_resume_state - Function that resumes the state of an calculation.
+ * @device_data:	Pointer to the device structure.
+ * @device_state:	The state to be restored in the hash hardware
+ */
+int hash_resume_state(struct hash_device_data *device_data,
+		      const struct hash_state *device_state)
+{
+	u32 temp_cr;
+	s32 count;
+	int hash_mode = HASH_OPER_MODE_HASH;
+
+	if (NULL == device_state) {
+		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
+			__func__);
+		return -EPERM;
+	}
+
+	/* Check correctness of index and length members */
+	if (device_state->index > HASH_BLOCK_SIZE ||
+	    (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
+		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
+			__func__);
+		return -EPERM;
+	}
+
+	/*
+	 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+	 * prepare the initialize the HASH accelerator to compute the message
+	 * digest of a new message.
+	 */
+	HASH_INITIALIZE;
+
+	temp_cr = device_state->temp_cr;
+	writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
+
+	if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
+		hash_mode = HASH_OPER_MODE_HMAC;
+	else
+		hash_mode = HASH_OPER_MODE_HASH;
+
+	for (count = 0; count < HASH_CSR_COUNT; count++) {
+		if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+			break;
+
+		writel_relaxed(device_state->csr[count],
+			       &device_data->base->csrx[count]);
+	}
+
+	writel_relaxed(device_state->csfull, &device_data->base->csfull);
+	writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
+
+	writel_relaxed(device_state->str_reg, &device_data->base->str);
+	writel_relaxed(temp_cr, &device_data->base->cr);
+
+	return 0;
+}
+
+/**
+ * hash_save_state - Function that saves the state of hardware.
+ * @device_data:	Pointer to the device structure.
+ * @device_state:	The strucure where the hardware state should be saved.
+ */
+int hash_save_state(struct hash_device_data *device_data,
+		    struct hash_state *device_state)
+{
+	u32 temp_cr;
+	u32 count;
+	int hash_mode = HASH_OPER_MODE_HASH;
+
+	if (NULL == device_state) {
+		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
+			__func__);
+		return -ENOTSUPP;
+	}
+
+	/* Write dummy value to force digest intermediate calculation. This
+	 * actually makes sure that there isn't any ongoing calculation in the
+	 * hardware.
+	 */
+	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
+		cpu_relax();
+
+	temp_cr = readl_relaxed(&device_data->base->cr);
+
+	device_state->str_reg = readl_relaxed(&device_data->base->str);
+
+	device_state->din_reg = readl_relaxed(&device_data->base->din);
+
+	if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
+		hash_mode = HASH_OPER_MODE_HMAC;
+	else
+		hash_mode = HASH_OPER_MODE_HASH;
+
+	for (count = 0; count < HASH_CSR_COUNT; count++) {
+		if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+			break;
+
+		device_state->csr[count] =
+			readl_relaxed(&device_data->base->csrx[count]);
+	}
+
+	device_state->csfull = readl_relaxed(&device_data->base->csfull);
+	device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
+
+	device_state->temp_cr = temp_cr;
+
+	return 0;
+}
+
+/**
+ * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
+ * @device_data:
+ *
+ */
+int hash_check_hw(struct hash_device_data *device_data)
+{
+	/* Checking Peripheral Ids  */
+	if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
+	    HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
+	    HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
+	    HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
+	    HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
+	    HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
+	    HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
+	    HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
+		return 0;
+	}
+
+	dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
+	return -ENOTSUPP;
+}
+
+/**
+ * hash_get_digest - Gets the digest.
+ * @device_data:	Pointer to the device structure.
+ * @digest:		User allocated byte array for the calculated digest.
+ * @algorithm:		The algorithm in use.
+ */
+void hash_get_digest(struct hash_device_data *device_data,
+		     u8 *digest, int algorithm)
+{
+	u32 temp_hx_val, count;
+	int loop_ctr;
+
+	if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
+		dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
+			__func__, algorithm);
+		return;
+	}
+
+	if (algorithm == HASH_ALGO_SHA1)
+		loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
+	else
+		loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
+
+	dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
+		__func__, (u32) digest);
+
+	/* Copy result into digest array */
+	for (count = 0; count < loop_ctr; count++) {
+		temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
+		digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
+		digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
+		digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
+		digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
+	}
+}
+
+/**
+ * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_update(struct ahash_request *req)
+{
+	int ret = 0;
+	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+	if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
+		ret = hash_hw_update(req);
+	/* Skip update for DMA, all data will be passed to DMA in final */
+
+	if (ret) {
+		pr_err("%s: hash_hw_update() failed!\n", __func__);
+	}
+
+	return ret;
+}
+
+/**
+ * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * @req:	The hash request for the job.
+ */
+static int ahash_final(struct ahash_request *req)
+{
+	int ret = 0;
+	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+	pr_debug("%s: data size: %d\n", __func__, req->nbytes);
+
+	if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
+		ret = hash_dma_final(req);
+	else
+		ret = hash_hw_final(req);
+
+	if (ret) {
+		pr_err("%s: hash_hw/dma_final() failed\n", __func__);
+	}
+
+	return ret;
+}
+
+static int hash_setkey(struct crypto_ahash *tfm,
+		       const u8 *key, unsigned int keylen, int alg)
+{
+	int ret = 0;
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	/**
+	 * Freed in final.
+	 */
+	ctx->key = kmemdup(key, keylen, GFP_KERNEL);
+	if (!ctx->key) {
+		pr_err("%s: Failed to allocate ctx->key for %d\n",
+		       __func__, alg);
+		return -ENOMEM;
+	}
+	ctx->keylen = keylen;
+
+	return ret;
+}
+
+static int ahash_sha1_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ctx->config.data_format = HASH_DATA_8_BITS;
+	ctx->config.algorithm = HASH_ALGO_SHA1;
+	ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+	ctx->digestsize = SHA1_DIGEST_SIZE;
+
+	return hash_init(req);
+}
+
+static int ahash_sha256_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ctx->config.data_format = HASH_DATA_8_BITS;
+	ctx->config.algorithm = HASH_ALGO_SHA256;
+	ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+	ctx->digestsize = SHA256_DIGEST_SIZE;
+
+	return hash_init(req);
+}
+
+static int ahash_sha1_digest(struct ahash_request *req)
+{
+	int ret2, ret1;
+
+	ret1 = ahash_sha1_init(req);
+	if (ret1)
+		goto out;
+
+	ret1 = ahash_update(req);
+	ret2 = ahash_final(req);
+
+out:
+	return ret1 ? ret1 : ret2;
+}
+
+static int ahash_sha256_digest(struct ahash_request *req)
+{
+	int ret2, ret1;
+
+	ret1 = ahash_sha256_init(req);
+	if (ret1)
+		goto out;
+
+	ret1 = ahash_update(req);
+	ret2 = ahash_final(req);
+
+out:
+	return ret1 ? ret1 : ret2;
+}
+
+static int ahash_noimport(struct ahash_request *req, const void *in)
+{
+	return -ENOSYS;
+}
+
+static int ahash_noexport(struct ahash_request *req, void *out)
+{
+	return -ENOSYS;
+}
+
+static int hmac_sha1_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ctx->config.data_format	= HASH_DATA_8_BITS;
+	ctx->config.algorithm	= HASH_ALGO_SHA1;
+	ctx->config.oper_mode	= HASH_OPER_MODE_HMAC;
+	ctx->digestsize		= SHA1_DIGEST_SIZE;
+
+	return hash_init(req);
+}
+
+static int hmac_sha256_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+	ctx->config.data_format	= HASH_DATA_8_BITS;
+	ctx->config.algorithm	= HASH_ALGO_SHA256;
+	ctx->config.oper_mode	= HASH_OPER_MODE_HMAC;
+	ctx->digestsize		= SHA256_DIGEST_SIZE;
+
+	return hash_init(req);
+}
+
+static int hmac_sha1_digest(struct ahash_request *req)
+{
+	int ret2, ret1;
+
+	ret1 = hmac_sha1_init(req);
+	if (ret1)
+		goto out;
+
+	ret1 = ahash_update(req);
+	ret2 = ahash_final(req);
+
+out:
+	return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha256_digest(struct ahash_request *req)
+{
+	int ret2, ret1;
+
+	ret1 = hmac_sha256_init(req);
+	if (ret1)
+		goto out;
+
+	ret1 = ahash_update(req);
+	ret2 = ahash_final(req);
+
+out:
+	return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_setkey(struct crypto_ahash *tfm,
+			    const u8 *key, unsigned int keylen)
+{
+	return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
+}
+
+static int hmac_sha256_setkey(struct crypto_ahash *tfm,
+			      const u8 *key, unsigned int keylen)
+{
+	return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
+}
+
+struct hash_algo_template {
+	struct hash_config conf;
+	struct ahash_alg hash;
+};
+
+static int hash_cra_init(struct crypto_tfm *tfm)
+{
+	struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct hash_algo_template *hash_alg;
+
+	hash_alg = container_of(__crypto_ahash_alg(alg),
+			struct hash_algo_template,
+			hash);
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct hash_req_ctx));
+
+	ctx->config.data_format = HASH_DATA_8_BITS;
+	ctx->config.algorithm = hash_alg->conf.algorithm;
+	ctx->config.oper_mode = hash_alg->conf.oper_mode;
+
+	ctx->digestsize = hash_alg->hash.halg.digestsize;
+
+	return 0;
+}
+
+static struct hash_algo_template hash_algs[] = {
+	{
+		.conf.algorithm = HASH_ALGO_SHA1,
+		.conf.oper_mode = HASH_OPER_MODE_HASH,
+		.hash = {
+			.init = hash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.digest = ahash_sha1_digest,
+			.export = ahash_noexport,
+			.import = ahash_noimport,
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct hash_ctx),
+			.halg.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "sha1-ux500",
+				.cra_flags = CRYPTO_ALG_ASYNC,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct hash_ctx),
+				.cra_init = hash_cra_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.conf.algorithm	= HASH_ALGO_SHA256,
+		.conf.oper_mode	= HASH_OPER_MODE_HASH,
+		.hash = {
+			.init = hash_init,
+			.update	= ahash_update,
+			.final = ahash_final,
+			.digest = ahash_sha256_digest,
+			.export = ahash_noexport,
+			.import = ahash_noimport,
+			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct hash_ctx),
+			.halg.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "sha256-ux500",
+				.cra_flags = CRYPTO_ALG_ASYNC,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct hash_ctx),
+				.cra_init = hash_cra_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.conf.algorithm = HASH_ALGO_SHA1,
+		.conf.oper_mode = HASH_OPER_MODE_HMAC,
+			.hash = {
+			.init = hash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.digest = hmac_sha1_digest,
+			.setkey = hmac_sha1_setkey,
+			.export = ahash_noexport,
+			.import = ahash_noimport,
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct hash_ctx),
+			.halg.base = {
+				.cra_name = "hmac(sha1)",
+				.cra_driver_name = "hmac-sha1-ux500",
+				.cra_flags = CRYPTO_ALG_ASYNC,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct hash_ctx),
+				.cra_init = hash_cra_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	},
+	{
+		.conf.algorithm = HASH_ALGO_SHA256,
+		.conf.oper_mode = HASH_OPER_MODE_HMAC,
+		.hash = {
+			.init = hash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.digest = hmac_sha256_digest,
+			.setkey = hmac_sha256_setkey,
+			.export = ahash_noexport,
+			.import = ahash_noimport,
+			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct hash_ctx),
+			.halg.base = {
+				.cra_name = "hmac(sha256)",
+				.cra_driver_name = "hmac-sha256-ux500",
+				.cra_flags = CRYPTO_ALG_ASYNC,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct hash_ctx),
+				.cra_init = hash_cra_init,
+				.cra_module = THIS_MODULE,
+			}
+		}
+	}
+};
+
+/**
+ * hash_algs_register_all -
+ */
+static int ahash_algs_register_all(struct hash_device_data *device_data)
+{
+	int ret;
+	int i;
+	int count;
+
+	for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
+		ret = crypto_register_ahash(&hash_algs[i].hash);
+		if (ret) {
+			count = i;
+			dev_err(device_data->dev, "%s: alg registration failed\n",
+				hash_algs[i].hash.halg.base.cra_driver_name);
+			goto unreg;
+		}
+	}
+	return 0;
+unreg:
+	for (i = 0; i < count; i++)
+		crypto_unregister_ahash(&hash_algs[i].hash);
+	return ret;
+}
+
+/**
+ * hash_algs_unregister_all -
+ */
+static void ahash_algs_unregister_all(struct hash_device_data *device_data)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
+		crypto_unregister_ahash(&hash_algs[i].hash);
+}
+
+/**
+ * ux500_hash_probe - Function that probes the hash hardware.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_probe(struct platform_device *pdev)
+{
+	int			ret = 0;
+	struct resource		*res = NULL;
+	struct hash_device_data *device_data;
+	struct device		*dev = &pdev->dev;
+
+	device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
+	if (!device_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	device_data->dev = dev;
+	device_data->current_ctx = NULL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	device_data->phybase = res->start;
+	device_data->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(device_data->base)) {
+		dev_err(dev, "%s: ioremap() failed!\n", __func__);
+		ret = PTR_ERR(device_data->base);
+		goto out;
+	}
+	spin_lock_init(&device_data->ctx_lock);
+	spin_lock_init(&device_data->power_state_lock);
+
+	/* Enable power for HASH1 hardware block */
+	device_data->regulator = regulator_get(dev, "v-ape");
+	if (IS_ERR(device_data->regulator)) {
+		dev_err(dev, "%s: regulator_get() failed!\n", __func__);
+		ret = PTR_ERR(device_data->regulator);
+		device_data->regulator = NULL;
+		goto out;
+	}
+
+	/* Enable the clock for HASH1 hardware block */
+	device_data->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(device_data->clk)) {
+		dev_err(dev, "%s: clk_get() failed!\n", __func__);
+		ret = PTR_ERR(device_data->clk);
+		goto out_regulator;
+	}
+
+	ret = clk_prepare(device_data->clk);
+	if (ret) {
+		dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
+		goto out_regulator;
+	}
+
+	/* Enable device power (and clock) */
+	ret = hash_enable_power(device_data, false);
+	if (ret) {
+		dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
+		goto out_clk_unprepare;
+	}
+
+	ret = hash_check_hw(device_data);
+	if (ret) {
+		dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
+		goto out_power;
+	}
+
+	if (hash_mode == HASH_MODE_DMA)
+		hash_dma_setup_channel(device_data, dev);
+
+	platform_set_drvdata(pdev, device_data);
+
+	/* Put the new device into the device list... */
+	klist_add_tail(&device_data->list_node, &driver_data.device_list);
+	/* ... and signal that a new device is available. */
+	up(&driver_data.device_allocation);
+
+	ret = ahash_algs_register_all(device_data);
+	if (ret) {
+		dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
+			__func__);
+		goto out_power;
+	}
+
+	dev_info(dev, "successfully registered\n");
+	return 0;
+
+out_power:
+	hash_disable_power(device_data, false);
+
+out_clk_unprepare:
+	clk_unprepare(device_data->clk);
+
+out_regulator:
+	regulator_put(device_data->regulator);
+
+out:
+	return ret;
+}
+
+/**
+ * ux500_hash_remove - Function that removes the hash device from the platform.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_remove(struct platform_device *pdev)
+{
+	struct hash_device_data *device_data;
+	struct device		*dev = &pdev->dev;
+
+	device_data = platform_get_drvdata(pdev);
+	if (!device_data) {
+		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Try to decrease the number of available devices. */
+	if (down_trylock(&driver_data.device_allocation))
+		return -EBUSY;
+
+	/* Check that the device is free */
+	spin_lock(&device_data->ctx_lock);
+	/* current_ctx allocates a device, NULL = unallocated */
+	if (device_data->current_ctx) {
+		/* The device is busy */
+		spin_unlock(&device_data->ctx_lock);
+		/* Return the device to the pool. */
+		up(&driver_data.device_allocation);
+		return -EBUSY;
+	}
+
+	spin_unlock(&device_data->ctx_lock);
+
+	/* Remove the device from the list */
+	if (klist_node_attached(&device_data->list_node))
+		klist_remove(&device_data->list_node);
+
+	/* If this was the last device, remove the services */
+	if (list_empty(&driver_data.device_list.k_list))
+		ahash_algs_unregister_all(device_data);
+
+	if (hash_disable_power(device_data, false))
+		dev_err(dev, "%s: hash_disable_power() failed\n",
+			__func__);
+
+	clk_unprepare(device_data->clk);
+	regulator_put(device_data->regulator);
+
+	return 0;
+}
+
+/**
+ * ux500_hash_shutdown - Function that shutdown the hash device.
+ * @pdev: The platform device
+ */
+static void ux500_hash_shutdown(struct platform_device *pdev)
+{
+	struct hash_device_data *device_data;
+
+	device_data = platform_get_drvdata(pdev);
+	if (!device_data) {
+		dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
+			__func__);
+		return;
+	}
+
+	/* Check that the device is free */
+	spin_lock(&device_data->ctx_lock);
+	/* current_ctx allocates a device, NULL = unallocated */
+	if (!device_data->current_ctx) {
+		if (down_trylock(&driver_data.device_allocation))
+			dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
+				__func__);
+		/**
+		 * (Allocate the device)
+		 * Need to set this to non-null (dummy) value,
+		 * to avoid usage if context switching.
+		 */
+		device_data->current_ctx++;
+	}
+	spin_unlock(&device_data->ctx_lock);
+
+	/* Remove the device from the list */
+	if (klist_node_attached(&device_data->list_node))
+		klist_remove(&device_data->list_node);
+
+	/* If this was the last device, remove the services */
+	if (list_empty(&driver_data.device_list.k_list))
+		ahash_algs_unregister_all(device_data);
+
+	if (hash_disable_power(device_data, false))
+		dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
+			__func__);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * ux500_hash_suspend - Function that suspends the hash device.
+ * @dev:	Device to suspend.
+ */
+static int ux500_hash_suspend(struct device *dev)
+{
+	int ret;
+	struct hash_device_data *device_data;
+	struct hash_ctx *temp_ctx = NULL;
+
+	device_data = dev_get_drvdata(dev);
+	if (!device_data) {
+		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
+		return -ENOMEM;
+	}
+
+	spin_lock(&device_data->ctx_lock);
+	if (!device_data->current_ctx)
+		device_data->current_ctx++;
+	spin_unlock(&device_data->ctx_lock);
+
+	if (device_data->current_ctx == ++temp_ctx) {
+		if (down_interruptible(&driver_data.device_allocation))
+			dev_dbg(dev, "%s: down_interruptible() failed\n",
+				__func__);
+		ret = hash_disable_power(device_data, false);
+
+	} else {
+		ret = hash_disable_power(device_data, true);
+	}
+
+	if (ret)
+		dev_err(dev, "%s: hash_disable_power()\n", __func__);
+
+	return ret;
+}
+
+/**
+ * ux500_hash_resume - Function that resume the hash device.
+ * @dev:	Device to resume.
+ */
+static int ux500_hash_resume(struct device *dev)
+{
+	int ret = 0;
+	struct hash_device_data *device_data;
+	struct hash_ctx *temp_ctx = NULL;
+
+	device_data = dev_get_drvdata(dev);
+	if (!device_data) {
+		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
+		return -ENOMEM;
+	}
+
+	spin_lock(&device_data->ctx_lock);
+	if (device_data->current_ctx == ++temp_ctx)
+		device_data->current_ctx = NULL;
+	spin_unlock(&device_data->ctx_lock);
+
+	if (!device_data->current_ctx)
+		up(&driver_data.device_allocation);
+	else
+		ret = hash_enable_power(device_data, true);
+
+	if (ret)
+		dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
+
+	return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
+
+static const struct of_device_id ux500_hash_match[] = {
+	{ .compatible = "stericsson,ux500-hash" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ux500_hash_match);
+
+static struct platform_driver hash_driver = {
+	.probe  = ux500_hash_probe,
+	.remove = ux500_hash_remove,
+	.shutdown = ux500_hash_shutdown,
+	.driver = {
+		.name  = "hash1",
+		.of_match_table = ux500_hash_match,
+		.pm    = &ux500_hash_pm,
+	}
+};
+
+/**
+ * ux500_hash_mod_init - The kernel module init function.
+ */
+static int __init ux500_hash_mod_init(void)
+{
+	klist_init(&driver_data.device_list, NULL, NULL);
+	/* Initialize the semaphore to 0 devices (locked state) */
+	sema_init(&driver_data.device_allocation, 0);
+
+	return platform_driver_register(&hash_driver);
+}
+
+/**
+ * ux500_hash_mod_fini - The kernel module exit function.
+ */
+static void __exit ux500_hash_mod_fini(void)
+{
+	platform_driver_unregister(&hash_driver);
+}
+
+module_init(ux500_hash_mod_init);
+module_exit(ux500_hash_mod_fini);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("hmac-sha1-all");
+MODULE_ALIAS_CRYPTO("hmac-sha256-all");
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
new file mode 100644
index 0000000..a4324b1
--- /dev/null
+++ b/drivers/crypto/virtio/Kconfig
@@ -0,0 +1,10 @@
+config CRYPTO_DEV_VIRTIO
+	tristate "VirtIO crypto driver"
+	depends on VIRTIO
+	select CRYPTO_AEAD
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_ENGINE
+	default m
+	help
+	  This driver provides support for virtio crypto device. If you
+	  choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 0000000..cbfcccc
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
+virtio_crypto-objs := \
+	virtio_crypto_algs.o \
+	virtio_crypto_mgr.o \
+	virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
new file mode 100644
index 0000000..2c573d1
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -0,0 +1,667 @@
+ /* Algorithms supported by virtio crypto device
+  *
+  * Authors: Gonglei <arei.gonglei@huawei.com>
+  *
+  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+struct virtio_crypto_ablkcipher_ctx {
+	struct crypto_engine_ctx enginectx;
+	struct virtio_crypto *vcrypto;
+	struct crypto_tfm *tfm;
+
+	struct virtio_crypto_sym_session_info enc_sess_info;
+	struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_sym_request {
+	struct virtio_crypto_request base;
+
+	/* Cipher or aead */
+	uint32_t type;
+	struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
+	struct ablkcipher_request *ablkcipher_req;
+	uint8_t *iv;
+	/* Encryption? */
+	bool encrypt;
+};
+
+struct virtio_crypto_algo {
+	uint32_t algonum;
+	uint32_t service;
+	unsigned int active_devs;
+	struct crypto_alg algo;
+};
+
+/*
+ * The algs_lock protects the below global virtio_crypto_active_devs
+ * and crypto algorithms registion.
+ */
+static DEFINE_MUTEX(algs_lock);
+static void virtio_crypto_ablkcipher_finalize_req(
+	struct virtio_crypto_sym_request *vc_sym_req,
+	struct ablkcipher_request *req,
+	int err);
+
+static void virtio_crypto_dataq_sym_callback
+		(struct virtio_crypto_request *vc_req, int len)
+{
+	struct virtio_crypto_sym_request *vc_sym_req =
+		container_of(vc_req, struct virtio_crypto_sym_request, base);
+	struct ablkcipher_request *ablk_req;
+	int error;
+
+	/* Finish the encrypt or decrypt process */
+	if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+		switch (vc_req->status) {
+		case VIRTIO_CRYPTO_OK:
+			error = 0;
+			break;
+		case VIRTIO_CRYPTO_INVSESS:
+		case VIRTIO_CRYPTO_ERR:
+			error = -EINVAL;
+			break;
+		case VIRTIO_CRYPTO_BADMSG:
+			error = -EBADMSG;
+			break;
+		default:
+			error = -EIO;
+			break;
+		}
+		ablk_req = vc_sym_req->ablkcipher_req;
+		virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
+							ablk_req, error);
+	}
+}
+
+static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
+{
+	u64 total = 0;
+
+	for (total = 0; sg; sg = sg_next(sg))
+		total += sg->length;
+
+	return total;
+}
+
+static int
+virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
+{
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_192:
+	case AES_KEYSIZE_256:
+		*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+		break;
+	default:
+		pr_err("virtio_crypto: Unsupported key length: %d\n",
+			key_len);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_session(
+		struct virtio_crypto_ablkcipher_ctx *ctx,
+		uint32_t alg, const uint8_t *key,
+		unsigned int keylen,
+		int encrypt)
+{
+	struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
+	unsigned int tmp;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
+	int err;
+	unsigned int num_out = 0, num_in = 0;
+
+	/*
+	 * Avoid to do DMA from the stack, switch to using
+	 * dynamically-allocated for the key
+	 */
+	uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
+
+	if (!cipher_key)
+		return -ENOMEM;
+
+	memcpy(cipher_key, key, keylen);
+
+	spin_lock(&vcrypto->ctrl_lock);
+	/* Pad ctrl header */
+	vcrypto->ctrl.header.opcode =
+		cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+	vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+	/* Set the default dataqueue id to 0 */
+	vcrypto->ctrl.header.queue_id = 0;
+
+	vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+	/* Pad cipher's parameters */
+	vcrypto->ctrl.u.sym_create_session.op_type =
+		cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+	vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
+		vcrypto->ctrl.header.algo;
+	vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
+		cpu_to_le32(keylen);
+	vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
+		cpu_to_le32(op);
+
+	sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+	sgs[num_out++] = &outhdr;
+
+	/* Set key */
+	sg_init_one(&key_sg, cipher_key, keylen);
+	sgs[num_out++] = &key_sg;
+
+	/* Return status and session id back */
+	sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+	sgs[num_out + num_in++] = &inhdr;
+
+	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+				num_in, vcrypto, GFP_ATOMIC);
+	if (err < 0) {
+		spin_unlock(&vcrypto->ctrl_lock);
+		kzfree(cipher_key);
+		return err;
+	}
+	virtqueue_kick(vcrypto->ctrl_vq);
+
+	/*
+	 * Trapping into the hypervisor, so the request should be
+	 * handled immediately.
+	 */
+	while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+	       !virtqueue_is_broken(vcrypto->ctrl_vq))
+		cpu_relax();
+
+	if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+		spin_unlock(&vcrypto->ctrl_lock);
+		pr_err("virtio_crypto: Create session failed status: %u\n",
+			le32_to_cpu(vcrypto->input.status));
+		kzfree(cipher_key);
+		return -EINVAL;
+	}
+
+	if (encrypt)
+		ctx->enc_sess_info.session_id =
+			le64_to_cpu(vcrypto->input.session_id);
+	else
+		ctx->dec_sess_info.session_id =
+			le64_to_cpu(vcrypto->input.session_id);
+
+	spin_unlock(&vcrypto->ctrl_lock);
+
+	kzfree(cipher_key);
+	return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_close_session(
+		struct virtio_crypto_ablkcipher_ctx *ctx,
+		int encrypt)
+{
+	struct scatterlist outhdr, status_sg, *sgs[2];
+	unsigned int tmp;
+	struct virtio_crypto_destroy_session_req *destroy_session;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	int err;
+	unsigned int num_out = 0, num_in = 0;
+
+	spin_lock(&vcrypto->ctrl_lock);
+	vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+	/* Pad ctrl header */
+	vcrypto->ctrl.header.opcode =
+		cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+	/* Set the default virtqueue id to 0 */
+	vcrypto->ctrl.header.queue_id = 0;
+
+	destroy_session = &vcrypto->ctrl.u.destroy_session;
+
+	if (encrypt)
+		destroy_session->session_id =
+			cpu_to_le64(ctx->enc_sess_info.session_id);
+	else
+		destroy_session->session_id =
+			cpu_to_le64(ctx->dec_sess_info.session_id);
+
+	sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+	sgs[num_out++] = &outhdr;
+
+	/* Return status and session id back */
+	sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
+		sizeof(vcrypto->ctrl_status.status));
+	sgs[num_out + num_in++] = &status_sg;
+
+	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+			num_in, vcrypto, GFP_ATOMIC);
+	if (err < 0) {
+		spin_unlock(&vcrypto->ctrl_lock);
+		return err;
+	}
+	virtqueue_kick(vcrypto->ctrl_vq);
+
+	while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+	       !virtqueue_is_broken(vcrypto->ctrl_vq))
+		cpu_relax();
+
+	if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+		spin_unlock(&vcrypto->ctrl_lock);
+		pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+			vcrypto->ctrl_status.status,
+			destroy_session->session_id);
+
+		return -EINVAL;
+	}
+	spin_unlock(&vcrypto->ctrl_lock);
+
+	return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_sessions(
+		struct virtio_crypto_ablkcipher_ctx *ctx,
+		const uint8_t *key, unsigned int keylen)
+{
+	uint32_t alg;
+	int ret;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+
+	if (keylen > vcrypto->max_cipher_key_len) {
+		pr_err("virtio_crypto: the key is too long\n");
+		goto bad_key;
+	}
+
+	if (virtio_crypto_alg_validate_key(keylen, &alg))
+		goto bad_key;
+
+	/* Create encryption session */
+	ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+			alg, key, keylen, 1);
+	if (ret)
+		return ret;
+	/* Create decryption session */
+	ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+			alg, key, keylen, 0);
+	if (ret) {
+		virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+		return ret;
+	}
+	return 0;
+
+bad_key:
+	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+/* Note: kernel crypto API realization */
+static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+					 const uint8_t *key,
+					 unsigned int keylen)
+{
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	uint32_t alg;
+	int ret;
+
+	ret = virtio_crypto_alg_validate_key(keylen, &alg);
+	if (ret)
+		return ret;
+
+	if (!ctx->vcrypto) {
+		/* New key */
+		int node = virtio_crypto_get_current_node();
+		struct virtio_crypto *vcrypto =
+				      virtcrypto_get_dev_node(node,
+				      VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
+		if (!vcrypto) {
+			pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
+			return -ENODEV;
+		}
+
+		ctx->vcrypto = vcrypto;
+	} else {
+		/* Rekeying, we should close the created sessions previously */
+		virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+		virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+	}
+
+	ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
+	if (ret) {
+		virtcrypto_dev_put(ctx->vcrypto);
+		ctx->vcrypto = NULL;
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+		struct ablkcipher_request *req,
+		struct data_queue *data_vq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
+	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	struct virtio_crypto_op_data_req *req_data;
+	int src_nents, dst_nents;
+	int err;
+	unsigned long flags;
+	struct scatterlist outhdr, iv_sg, status_sg, **sgs;
+	int i;
+	u64 dst_len;
+	unsigned int num_out = 0, num_in = 0;
+	int sg_total;
+	uint8_t *iv;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	dst_nents = sg_nents(req->dst);
+
+	pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
+			src_nents, dst_nents);
+
+	/* Why 3?  outhdr + iv + inhdr */
+	sg_total = src_nents + dst_nents + 3;
+	sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
+				dev_to_node(&vcrypto->vdev->dev));
+	if (!sgs)
+		return -ENOMEM;
+
+	req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
+				dev_to_node(&vcrypto->vdev->dev));
+	if (!req_data) {
+		kfree(sgs);
+		return -ENOMEM;
+	}
+
+	vc_req->req_data = req_data;
+	vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+	/* Head of operation */
+	if (vc_sym_req->encrypt) {
+		req_data->header.session_id =
+			cpu_to_le64(ctx->enc_sess_info.session_id);
+		req_data->header.opcode =
+			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
+	} else {
+		req_data->header.session_id =
+			cpu_to_le64(ctx->dec_sess_info.session_id);
+	    req_data->header.opcode =
+			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
+	}
+	req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+	req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
+	req_data->u.sym_req.u.cipher.para.src_data_len =
+			cpu_to_le32(req->nbytes);
+
+	dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
+	if (unlikely(dst_len > U32_MAX)) {
+		pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
+		err = -EINVAL;
+		goto free;
+	}
+
+	pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
+			req->nbytes, dst_len);
+
+	if (unlikely(req->nbytes + dst_len + ivsize +
+		sizeof(vc_req->status) > vcrypto->max_size)) {
+		pr_err("virtio_crypto: The length is too big\n");
+		err = -EINVAL;
+		goto free;
+	}
+
+	req_data->u.sym_req.u.cipher.para.dst_data_len =
+			cpu_to_le32((uint32_t)dst_len);
+
+	/* Outhdr */
+	sg_init_one(&outhdr, req_data, sizeof(*req_data));
+	sgs[num_out++] = &outhdr;
+
+	/* IV */
+
+	/*
+	 * Avoid to do DMA from the stack, switch to using
+	 * dynamically-allocated for the IV
+	 */
+	iv = kzalloc_node(ivsize, GFP_ATOMIC,
+				dev_to_node(&vcrypto->vdev->dev));
+	if (!iv) {
+		err = -ENOMEM;
+		goto free;
+	}
+	memcpy(iv, req->info, ivsize);
+	sg_init_one(&iv_sg, iv, ivsize);
+	sgs[num_out++] = &iv_sg;
+	vc_sym_req->iv = iv;
+
+	/* Source data */
+	for (i = 0; i < src_nents; i++)
+		sgs[num_out++] = &req->src[i];
+
+	/* Destination data */
+	for (i = 0; i < dst_nents; i++)
+		sgs[num_out + num_in++] = &req->dst[i];
+
+	/* Status */
+	sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
+	sgs[num_out + num_in++] = &status_sg;
+
+	vc_req->sgs = sgs;
+
+	spin_lock_irqsave(&data_vq->lock, flags);
+	err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
+				num_in, vc_req, GFP_ATOMIC);
+	virtqueue_kick(data_vq->vq);
+	spin_unlock_irqrestore(&data_vq->lock, flags);
+	if (unlikely(err < 0))
+		goto free_iv;
+
+	return 0;
+
+free_iv:
+	kzfree(iv);
+free:
+	kzfree(req_data);
+	kfree(sgs);
+	return err;
+}
+
+static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+	struct virtio_crypto_sym_request *vc_sym_req =
+				ablkcipher_request_ctx(req);
+	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	/* Use the first data virtqueue as default */
+	struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+	vc_req->dataq = data_vq;
+	vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+	vc_sym_req->ablkcipher_ctx = ctx;
+	vc_sym_req->ablkcipher_req = req;
+	vc_sym_req->encrypt = true;
+
+	return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+	struct virtio_crypto_sym_request *vc_sym_req =
+				ablkcipher_request_ctx(req);
+	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	/* Use the first data virtqueue as default */
+	struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+	vc_req->dataq = data_vq;
+	vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+	vc_sym_req->ablkcipher_ctx = ctx;
+	vc_sym_req->ablkcipher_req = req;
+	vc_sym_req->encrypt = false;
+
+	return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
+	ctx->tfm = tfm;
+
+	ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
+	ctx->enginectx.op.prepare_request = NULL;
+	ctx->enginectx.op.unprepare_request = NULL;
+	return 0;
+}
+
+static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (!ctx->vcrypto)
+		return;
+
+	virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+	virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+	virtcrypto_dev_put(ctx->vcrypto);
+	ctx->vcrypto = NULL;
+}
+
+int virtio_crypto_ablkcipher_crypt_req(
+	struct crypto_engine *engine, void *vreq)
+{
+	struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
+	struct virtio_crypto_sym_request *vc_sym_req =
+				ablkcipher_request_ctx(req);
+	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+	struct data_queue *data_vq = vc_req->dataq;
+	int ret;
+
+	ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
+	if (ret < 0)
+		return ret;
+
+	virtqueue_kick(data_vq->vq);
+
+	return 0;
+}
+
+static void virtio_crypto_ablkcipher_finalize_req(
+	struct virtio_crypto_sym_request *vc_sym_req,
+	struct ablkcipher_request *req,
+	int err)
+{
+	crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
+					   req, err);
+	kzfree(vc_sym_req->iv);
+	virtcrypto_clear_request(&vc_sym_req->base);
+}
+
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+	.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+	.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+	.algo = {
+		.cra_name = "cbc(aes)",
+		.cra_driver_name = "virtio_crypto_aes_cbc",
+		.cra_priority = 150,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_init = virtio_crypto_ablkcipher_init,
+		.cra_exit = virtio_crypto_ablkcipher_exit,
+		.cra_u = {
+			.ablkcipher = {
+				.setkey = virtio_crypto_ablkcipher_setkey,
+				.decrypt = virtio_crypto_ablkcipher_decrypt,
+				.encrypt = virtio_crypto_ablkcipher_encrypt,
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			},
+		},
+	},
+} };
+
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
+{
+	int ret = 0;
+	int i = 0;
+
+	mutex_lock(&algs_lock);
+
+	for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+		uint32_t service = virtio_crypto_algs[i].service;
+		uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+		if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+			continue;
+
+		if (virtio_crypto_algs[i].active_devs == 0) {
+			ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+			if (ret)
+				goto unlock;
+		}
+
+		virtio_crypto_algs[i].active_devs++;
+		dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+			 virtio_crypto_algs[i].algo.cra_name);
+	}
+
+unlock:
+	mutex_unlock(&algs_lock);
+	return ret;
+}
+
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
+{
+	int i = 0;
+
+	mutex_lock(&algs_lock);
+
+	for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+		uint32_t service = virtio_crypto_algs[i].service;
+		uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+		if (virtio_crypto_algs[i].active_devs == 0 ||
+		    !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+			continue;
+
+		if (virtio_crypto_algs[i].active_devs == 1)
+			crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+
+		virtio_crypto_algs[i].active_devs--;
+	}
+
+	mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
new file mode 100644
index 0000000..63ef7f7
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -0,0 +1,147 @@
+/* Common header for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _VIRTIO_CRYPTO_COMMON_H
+#define _VIRTIO_CRYPTO_COMMON_H
+
+#include <linux/virtio.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+
+
+/* Internal representation of a data virtqueue */
+struct data_queue {
+	/* Virtqueue associated with this send _queue */
+	struct virtqueue *vq;
+
+	/* To protect the vq operations for the dataq */
+	spinlock_t lock;
+
+	/* Name of the tx queue: dataq.$index */
+	char name[32];
+
+	struct crypto_engine *engine;
+};
+
+struct virtio_crypto {
+	struct virtio_device *vdev;
+	struct virtqueue *ctrl_vq;
+	struct data_queue *data_vq;
+
+	/* To protect the vq operations for the controlq */
+	spinlock_t ctrl_lock;
+
+	/* Maximum of data queues supported by the device */
+	u32 max_data_queues;
+
+	/* Number of queue currently used by the driver */
+	u32 curr_queue;
+
+	/*
+	 * Specifies the services mask which the device support,
+	 * see VIRTIO_CRYPTO_SERVICE_*
+	 */
+	u32 crypto_services;
+
+	/* Detailed algorithms mask */
+	u32 cipher_algo_l;
+	u32 cipher_algo_h;
+	u32 hash_algo;
+	u32 mac_algo_l;
+	u32 mac_algo_h;
+	u32 aead_algo;
+
+	/* Maximum length of cipher key */
+	u32 max_cipher_key_len;
+	/* Maximum length of authenticated key */
+	u32 max_auth_key_len;
+	/* Maximum size of per request */
+	u64 max_size;
+
+	/* Control VQ buffers: protected by the ctrl_lock */
+	struct virtio_crypto_op_ctrl_req ctrl;
+	struct virtio_crypto_session_input input;
+	struct virtio_crypto_inhdr ctrl_status;
+
+	unsigned long status;
+	atomic_t ref_count;
+	struct list_head list;
+	struct module *owner;
+	uint8_t dev_id;
+
+	/* Does the affinity hint is set for virtqueues? */
+	bool affinity_hint_set;
+};
+
+struct virtio_crypto_sym_session_info {
+	/* Backend session id, which come from the host side */
+	__u64 session_id;
+};
+
+struct virtio_crypto_request;
+typedef void (*virtio_crypto_data_callback)
+		(struct virtio_crypto_request *vc_req, int len);
+
+struct virtio_crypto_request {
+	uint8_t status;
+	struct virtio_crypto_op_data_req *req_data;
+	struct scatterlist **sgs;
+	struct data_queue *dataq;
+	virtio_crypto_data_callback alg_cb;
+};
+
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
+struct list_head *virtcrypto_devmgr_get_head(void);
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_devmgr_get_first(void);
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
+				  uint32_t service,
+				  uint32_t algo);
+struct virtio_crypto *virtcrypto_get_dev_node(int node,
+					      uint32_t service,
+					      uint32_t algo);
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
+int virtio_crypto_ablkcipher_crypt_req(
+	struct crypto_engine *engine, void *vreq);
+
+void
+virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
+
+static inline int virtio_crypto_get_current_node(void)
+{
+	int cpu, node;
+
+	cpu = get_cpu();
+	node = topology_physical_package_id(cpu);
+	put_cpu();
+
+	return node;
+}
+
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
+
+#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
new file mode 100644
index 0000000..3c9e120
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -0,0 +1,542 @@
+ /* Driver for Virtio crypto device.
+  *
+  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/virtio_config.h>
+#include <linux/cpu.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+void
+virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
+{
+	if (vc_req) {
+		kzfree(vc_req->req_data);
+		kfree(vc_req->sgs);
+	}
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+	struct virtio_crypto *vcrypto = vq->vdev->priv;
+	struct virtio_crypto_request *vc_req;
+	unsigned long flags;
+	unsigned int len;
+	unsigned int qid = vq->index;
+
+	spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
+	do {
+		virtqueue_disable_cb(vq);
+		while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+			spin_unlock_irqrestore(
+				&vcrypto->data_vq[qid].lock, flags);
+			if (vc_req->alg_cb)
+				vc_req->alg_cb(vc_req, len);
+			spin_lock_irqsave(
+				&vcrypto->data_vq[qid].lock, flags);
+		}
+	} while (!virtqueue_enable_cb(vq));
+	spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
+}
+
+static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+{
+	vq_callback_t **callbacks;
+	struct virtqueue **vqs;
+	int ret = -ENOMEM;
+	int i, total_vqs;
+	const char **names;
+	struct device *dev = &vi->vdev->dev;
+
+	/*
+	 * We expect 1 data virtqueue, followed by
+	 * possible N-1 data queues used in multiqueue mode,
+	 * followed by control vq.
+	 */
+	total_vqs = vi->max_data_queues + 1;
+
+	/* Allocate space for find_vqs parameters */
+	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
+	if (!vqs)
+		goto err_vq;
+	callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
+	if (!callbacks)
+		goto err_callback;
+	names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
+	if (!names)
+		goto err_names;
+
+	/* Parameters for control virtqueue */
+	callbacks[total_vqs - 1] = NULL;
+	names[total_vqs - 1] = "controlq";
+
+	/* Allocate/initialize parameters for data virtqueues */
+	for (i = 0; i < vi->max_data_queues; i++) {
+		callbacks[i] = virtcrypto_dataq_callback;
+		snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
+				"dataq.%d", i);
+		names[i] = vi->data_vq[i].name;
+	}
+
+	ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
+	if (ret)
+		goto err_find;
+
+	vi->ctrl_vq = vqs[total_vqs - 1];
+
+	for (i = 0; i < vi->max_data_queues; i++) {
+		spin_lock_init(&vi->data_vq[i].lock);
+		vi->data_vq[i].vq = vqs[i];
+		/* Initialize crypto engine */
+		vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
+		if (!vi->data_vq[i].engine) {
+			ret = -ENOMEM;
+			goto err_engine;
+		}
+	}
+
+	kfree(names);
+	kfree(callbacks);
+	kfree(vqs);
+
+	return 0;
+
+err_engine:
+err_find:
+	kfree(names);
+err_names:
+	kfree(callbacks);
+err_callback:
+	kfree(vqs);
+err_vq:
+	return ret;
+}
+
+static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
+{
+	vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
+				GFP_KERNEL);
+	if (!vi->data_vq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
+{
+	int i;
+
+	if (vi->affinity_hint_set) {
+		for (i = 0; i < vi->max_data_queues; i++)
+			virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
+
+		vi->affinity_hint_set = false;
+	}
+}
+
+static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
+{
+	int i = 0;
+	int cpu;
+
+	/*
+	 * In single queue mode, we don't set the cpu affinity.
+	 */
+	if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
+		virtcrypto_clean_affinity(vcrypto, -1);
+		return;
+	}
+
+	/*
+	 * In multiqueue mode, we let the queue to be private to one cpu
+	 * by setting the affinity hint to eliminate the contention.
+	 *
+	 * TODO: adds cpu hotplug support by register cpu notifier.
+	 *
+	 */
+	for_each_online_cpu(cpu) {
+		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
+		if (++i >= vcrypto->max_data_queues)
+			break;
+	}
+
+	vcrypto->affinity_hint_set = true;
+}
+
+static void virtcrypto_free_queues(struct virtio_crypto *vi)
+{
+	kfree(vi->data_vq);
+}
+
+static int virtcrypto_init_vqs(struct virtio_crypto *vi)
+{
+	int ret;
+
+	/* Allocate send & receive queues */
+	ret = virtcrypto_alloc_queues(vi);
+	if (ret)
+		goto err;
+
+	ret = virtcrypto_find_vqs(vi);
+	if (ret)
+		goto err_free;
+
+	get_online_cpus();
+	virtcrypto_set_affinity(vi);
+	put_online_cpus();
+
+	return 0;
+
+err_free:
+	virtcrypto_free_queues(vi);
+err:
+	return ret;
+}
+
+static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
+{
+	u32 status;
+	int err;
+
+	virtio_cread(vcrypto->vdev,
+	    struct virtio_crypto_config, status, &status);
+
+	/*
+	 * Unknown status bits would be a host error and the driver
+	 * should consider the device to be broken.
+	 */
+	if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
+		dev_warn(&vcrypto->vdev->dev,
+				"Unknown status bits: 0x%x\n", status);
+
+		virtio_break_device(vcrypto->vdev);
+		return -EPERM;
+	}
+
+	if (vcrypto->status == status)
+		return 0;
+
+	vcrypto->status = status;
+
+	if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
+		err = virtcrypto_dev_start(vcrypto);
+		if (err) {
+			dev_err(&vcrypto->vdev->dev,
+				"Failed to start virtio crypto device.\n");
+
+			return -EPERM;
+		}
+		dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
+	} else {
+		virtcrypto_dev_stop(vcrypto);
+		dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
+	}
+
+	return 0;
+}
+
+static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
+{
+	int32_t i;
+	int ret;
+
+	for (i = 0; i < vcrypto->max_data_queues; i++) {
+		if (vcrypto->data_vq[i].engine) {
+			ret = crypto_engine_start(vcrypto->data_vq[i].engine);
+			if (ret)
+				goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	while (--i >= 0)
+		if (vcrypto->data_vq[i].engine)
+			crypto_engine_exit(vcrypto->data_vq[i].engine);
+
+	return ret;
+}
+
+static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
+{
+	u32 i;
+
+	for (i = 0; i < vcrypto->max_data_queues; i++)
+		if (vcrypto->data_vq[i].engine)
+			crypto_engine_exit(vcrypto->data_vq[i].engine);
+}
+
+static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
+{
+	struct virtio_device *vdev = vcrypto->vdev;
+
+	virtcrypto_clean_affinity(vcrypto, -1);
+
+	vdev->config->del_vqs(vdev);
+
+	virtcrypto_free_queues(vcrypto);
+}
+
+static int virtcrypto_probe(struct virtio_device *vdev)
+{
+	int err = -EFAULT;
+	struct virtio_crypto *vcrypto;
+	u32 max_data_queues = 0, max_cipher_key_len = 0;
+	u32 max_auth_key_len = 0;
+	u64 max_size = 0;
+	u32 cipher_algo_l = 0;
+	u32 cipher_algo_h = 0;
+	u32 hash_algo = 0;
+	u32 mac_algo_l = 0;
+	u32 mac_algo_h = 0;
+	u32 aead_algo = 0;
+	u32 crypto_services = 0;
+
+	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+		return -ENODEV;
+
+	if (!vdev->config->get) {
+		dev_err(&vdev->dev, "%s failure: config access disabled\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
+		/*
+		 * If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow.
+		 */
+		dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
+					dev_to_node(&vdev->dev));
+	if (!vcrypto)
+		return -ENOMEM;
+
+	virtio_cread(vdev, struct virtio_crypto_config,
+			max_dataqueues, &max_data_queues);
+	if (max_data_queues < 1)
+		max_data_queues = 1;
+
+	virtio_cread(vdev, struct virtio_crypto_config,
+		max_cipher_key_len, &max_cipher_key_len);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		max_auth_key_len, &max_auth_key_len);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		max_size, &max_size);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		crypto_services, &crypto_services);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		cipher_algo_l, &cipher_algo_l);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		cipher_algo_h, &cipher_algo_h);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		hash_algo, &hash_algo);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		mac_algo_l, &mac_algo_l);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		mac_algo_h, &mac_algo_h);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		aead_algo, &aead_algo);
+
+	/* Add virtio crypto device to global table */
+	err = virtcrypto_devmgr_add_dev(vcrypto);
+	if (err) {
+		dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
+		goto free;
+	}
+	vcrypto->owner = THIS_MODULE;
+	vcrypto = vdev->priv = vcrypto;
+	vcrypto->vdev = vdev;
+
+	spin_lock_init(&vcrypto->ctrl_lock);
+
+	/* Use single data queue as default */
+	vcrypto->curr_queue = 1;
+	vcrypto->max_data_queues = max_data_queues;
+	vcrypto->max_cipher_key_len = max_cipher_key_len;
+	vcrypto->max_auth_key_len = max_auth_key_len;
+	vcrypto->max_size = max_size;
+	vcrypto->crypto_services = crypto_services;
+	vcrypto->cipher_algo_l = cipher_algo_l;
+	vcrypto->cipher_algo_h = cipher_algo_h;
+	vcrypto->mac_algo_l = mac_algo_l;
+	vcrypto->mac_algo_h = mac_algo_h;
+	vcrypto->hash_algo = hash_algo;
+	vcrypto->aead_algo = aead_algo;
+
+
+	dev_info(&vdev->dev,
+		"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
+		vcrypto->max_data_queues,
+		vcrypto->max_cipher_key_len,
+		vcrypto->max_auth_key_len,
+		vcrypto->max_size);
+
+	err = virtcrypto_init_vqs(vcrypto);
+	if (err) {
+		dev_err(&vdev->dev, "Failed to initialize vqs.\n");
+		goto free_dev;
+	}
+
+	err = virtcrypto_start_crypto_engines(vcrypto);
+	if (err)
+		goto free_vqs;
+
+	virtio_device_ready(vdev);
+
+	err = virtcrypto_update_status(vcrypto);
+	if (err)
+		goto free_engines;
+
+	return 0;
+
+free_engines:
+	virtcrypto_clear_crypto_engines(vcrypto);
+free_vqs:
+	vcrypto->vdev->config->reset(vdev);
+	virtcrypto_del_vqs(vcrypto);
+free_dev:
+	virtcrypto_devmgr_rm_dev(vcrypto);
+free:
+	kfree(vcrypto);
+	return err;
+}
+
+static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+{
+	struct virtio_crypto_request *vc_req;
+	int i;
+	struct virtqueue *vq;
+
+	for (i = 0; i < vcrypto->max_data_queues; i++) {
+		vq = vcrypto->data_vq[i].vq;
+		while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
+			kfree(vc_req->req_data);
+			kfree(vc_req->sgs);
+		}
+	}
+}
+
+static void virtcrypto_remove(struct virtio_device *vdev)
+{
+	struct virtio_crypto *vcrypto = vdev->priv;
+
+	dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
+	if (virtcrypto_dev_started(vcrypto))
+		virtcrypto_dev_stop(vcrypto);
+	vdev->config->reset(vdev);
+	virtcrypto_free_unused_reqs(vcrypto);
+	virtcrypto_clear_crypto_engines(vcrypto);
+	virtcrypto_del_vqs(vcrypto);
+	virtcrypto_devmgr_rm_dev(vcrypto);
+	kfree(vcrypto);
+}
+
+static void virtcrypto_config_changed(struct virtio_device *vdev)
+{
+	struct virtio_crypto *vcrypto = vdev->priv;
+
+	virtcrypto_update_status(vcrypto);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtcrypto_freeze(struct virtio_device *vdev)
+{
+	struct virtio_crypto *vcrypto = vdev->priv;
+
+	vdev->config->reset(vdev);
+	virtcrypto_free_unused_reqs(vcrypto);
+	if (virtcrypto_dev_started(vcrypto))
+		virtcrypto_dev_stop(vcrypto);
+
+	virtcrypto_clear_crypto_engines(vcrypto);
+	virtcrypto_del_vqs(vcrypto);
+	return 0;
+}
+
+static int virtcrypto_restore(struct virtio_device *vdev)
+{
+	struct virtio_crypto *vcrypto = vdev->priv;
+	int err;
+
+	err = virtcrypto_init_vqs(vcrypto);
+	if (err)
+		return err;
+
+	err = virtcrypto_start_crypto_engines(vcrypto);
+	if (err)
+		goto free_vqs;
+
+	virtio_device_ready(vdev);
+
+	err = virtcrypto_dev_start(vcrypto);
+	if (err) {
+		dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
+		goto free_engines;
+	}
+
+	return 0;
+
+free_engines:
+	virtcrypto_clear_crypto_engines(vcrypto);
+free_vqs:
+	vcrypto->vdev->config->reset(vdev);
+	virtcrypto_del_vqs(vcrypto);
+	return err;
+}
+#endif
+
+static unsigned int features[] = {
+	/* none */
+};
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static struct virtio_driver virtio_crypto_driver = {
+	.driver.name         = KBUILD_MODNAME,
+	.driver.owner        = THIS_MODULE,
+	.feature_table       = features,
+	.feature_table_size  = ARRAY_SIZE(features),
+	.id_table            = id_table,
+	.probe               = virtcrypto_probe,
+	.remove              = virtcrypto_remove,
+	.config_changed = virtcrypto_config_changed,
+#ifdef CONFIG_PM_SLEEP
+	.freeze = virtcrypto_freeze,
+	.restore = virtcrypto_restore,
+#endif
+};
+
+module_virtio_driver(virtio_crypto_driver);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("virtio crypto device driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
new file mode 100644
index 0000000..d70de3a
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -0,0 +1,333 @@
+ /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
+  *
+  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+static LIST_HEAD(virtio_crypto_table);
+static uint32_t num_devices;
+
+/* The table_lock protects the above global list and num_devices */
+static DEFINE_MUTEX(table_lock);
+
+#define VIRTIO_CRYPTO_MAX_DEVICES 32
+
+
+/*
+ * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
+ * framework.
+ * @vcrypto_dev:  Pointer to virtio crypto device.
+ *
+ * Function adds virtio crypto device to the global list.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
+{
+	struct list_head *itr;
+
+	mutex_lock(&table_lock);
+	if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
+		pr_info("virtio_crypto: only support up to %d devices\n",
+			    VIRTIO_CRYPTO_MAX_DEVICES);
+		mutex_unlock(&table_lock);
+		return -EFAULT;
+	}
+
+	list_for_each(itr, &virtio_crypto_table) {
+		struct virtio_crypto *ptr =
+				list_entry(itr, struct virtio_crypto, list);
+
+		if (ptr == vcrypto_dev) {
+			mutex_unlock(&table_lock);
+			return -EEXIST;
+		}
+	}
+	atomic_set(&vcrypto_dev->ref_count, 0);
+	list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
+	vcrypto_dev->dev_id = num_devices++;
+	mutex_unlock(&table_lock);
+	return 0;
+}
+
+struct list_head *virtcrypto_devmgr_get_head(void)
+{
+	return &virtio_crypto_table;
+}
+
+/*
+ * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
+ * framework.
+ * @vcrypto_dev:  Pointer to virtio crypto device.
+ *
+ * Function removes virtio crypto device from the acceleration framework.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
+{
+	mutex_lock(&table_lock);
+	list_del(&vcrypto_dev->list);
+	num_devices--;
+	mutex_unlock(&table_lock);
+}
+
+/*
+ * virtcrypto_devmgr_get_first()
+ *
+ * Function returns the first virtio crypto device from the acceleration
+ * framework.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_devmgr_get_first(void)
+{
+	struct virtio_crypto *dev = NULL;
+
+	mutex_lock(&table_lock);
+	if (!list_empty(&virtio_crypto_table))
+		dev = list_first_entry(&virtio_crypto_table,
+					struct virtio_crypto,
+				    list);
+	mutex_unlock(&table_lock);
+	return dev;
+}
+
+/*
+ * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
+{
+	return atomic_read(&vcrypto_dev->ref_count) != 0;
+}
+
+/*
+ * virtcrypto_dev_get() - Increment vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Increment the vcrypto_dev refcount and if this is the first time
+ * incrementing it during this period the vcrypto_dev is in use,
+ * increment the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
+{
+	if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
+		if (!try_module_get(vcrypto_dev->owner))
+			return -EFAULT;
+	return 0;
+}
+
+/*
+ * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Decrement the vcrypto_dev refcount and if this is the last time
+ * decrementing it during this period the vcrypto_dev is in use,
+ * decrement the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
+{
+	if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
+		module_put(vcrypto_dev->owner);
+}
+
+/*
+ * virtcrypto_dev_started() - Check whether device has started
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
+{
+	return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
+}
+
+/*
+ * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
+ * @node:  Node id the driver works.
+ * @service: Crypto service that needs to be supported by the
+ *	      dev
+ * @algo: The algorithm number that needs to be supported by the
+ *	  dev
+ *
+ * Function returns the virtio crypto device used fewest on the node,
+ * and supports the given crypto service and algorithm.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
+					      uint32_t algo)
+{
+	struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
+	unsigned long best = ~0;
+	unsigned long ctr;
+
+	mutex_lock(&table_lock);
+	list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
+
+		if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
+		     dev_to_node(&tmp_dev->vdev->dev) < 0) &&
+		    virtcrypto_dev_started(tmp_dev) &&
+		    virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
+			ctr = atomic_read(&tmp_dev->ref_count);
+			if (best > ctr) {
+				vcrypto_dev = tmp_dev;
+				best = ctr;
+			}
+		}
+	}
+
+	if (!vcrypto_dev) {
+		pr_info("virtio_crypto: Could not find a device on node %d\n",
+				node);
+		/* Get any started device */
+		list_for_each_entry(tmp_dev,
+				virtcrypto_devmgr_get_head(), list) {
+			if (virtcrypto_dev_started(tmp_dev) &&
+			    virtcrypto_algo_is_supported(tmp_dev,
+			    service, algo)) {
+				vcrypto_dev = tmp_dev;
+				break;
+			}
+		}
+	}
+	mutex_unlock(&table_lock);
+	if (!vcrypto_dev)
+		return NULL;
+
+	virtcrypto_dev_get(vcrypto_dev);
+	return vcrypto_dev;
+}
+
+/*
+ * virtcrypto_dev_start() - Start virtio crypto device
+ * @vcrypto:    Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, EFAULT when fail to register algorithms
+ */
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
+{
+	if (virtio_crypto_algs_register(vcrypto)) {
+		pr_err("virtio_crypto: Failed to register crypto algs\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ * virtcrypto_dev_stop() - Stop virtio crypto device
+ * @vcrypto:    Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
+{
+	virtio_crypto_algs_unregister(vcrypto);
+}
+
+/*
+ * vcrypto_algo_is_supported()
+ * @vcrypto: Pointer to virtio crypto device.
+ * @service: The bit number for service validate.
+ *	      See VIRTIO_CRYPTO_SERVICE_*
+ * @algo : The bit number for the algorithm to validate.
+ *
+ *
+ * Validate if the virtio crypto device supports a service and
+ * algo.
+ *
+ * Return true if device supports a service and algo.
+ */
+
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
+				  uint32_t service,
+				  uint32_t algo)
+{
+	uint32_t service_mask = 1u << service;
+	uint32_t algo_mask = 0;
+	bool low = true;
+
+	if (algo > 31) {
+		algo -= 32;
+		low = false;
+	}
+
+	if (!(vcrypto->crypto_services & service_mask))
+		return false;
+
+	switch (service) {
+	case VIRTIO_CRYPTO_SERVICE_CIPHER:
+		if (low)
+			algo_mask = vcrypto->cipher_algo_l;
+		else
+			algo_mask = vcrypto->cipher_algo_h;
+		break;
+
+	case VIRTIO_CRYPTO_SERVICE_HASH:
+		algo_mask = vcrypto->hash_algo;
+		break;
+
+	case VIRTIO_CRYPTO_SERVICE_MAC:
+		if (low)
+			algo_mask = vcrypto->mac_algo_l;
+		else
+			algo_mask = vcrypto->mac_algo_h;
+		break;
+
+	case VIRTIO_CRYPTO_SERVICE_AEAD:
+		algo_mask = vcrypto->aead_algo;
+		break;
+	}
+
+	if (!(algo_mask & (1u << algo)))
+		return false;
+
+	return true;
+}
diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore
new file mode 100644
index 0000000..af4a7ce
--- /dev/null
+++ b/drivers/crypto/vmx/.gitignore
@@ -0,0 +1,2 @@
+aesp8-ppc.S
+ghashp8-ppc.S
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
new file mode 100644
index 0000000..c3d524e
--- /dev/null
+++ b/drivers/crypto/vmx/Kconfig
@@ -0,0 +1,9 @@
+config CRYPTO_DEV_VMX_ENCRYPT
+	tristate "Encryption acceleration support on P8 CPU"
+	depends on CRYPTO_DEV_VMX
+	select CRYPTO_GHASH
+	default m
+	help
+	  Support for VMX cryptographic acceleration instructions on Power8 CPU.
+	  This module supports acceleration for AES and GHASH in hardware. If you
+	  choose 'M' here, this module will be called vmx-crypto.
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
new file mode 100644
index 0000000..cab32cf
--- /dev/null
+++ b/drivers/crypto/vmx/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
+vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
+
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+TARGET := linux-ppc64le
+else
+TARGET := linux-ppc64
+endif
+
+quiet_cmd_perl = PERL $@
+      cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
+
+targets += aesp8-ppc.S ghashp8-ppc.S
+
+$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
+	$(call if_changed,perl)
+  
+$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
+	$(call if_changed,perl)
+
+clean-files := aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
new file mode 100644
index 0000000..d7316f7
--- /dev/null
+++ b/drivers/crypto/vmx/aes.c
@@ -0,0 +1,143 @@
+/**
+ * AES routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_ctx {
+	struct crypto_cipher *fallback;
+	struct aes_key enc_key;
+	struct aes_key dec_key;
+};
+
+static int p8_aes_init(struct crypto_tfm *tfm)
+{
+	const char *alg = crypto_tfm_alg_name(tfm);
+	struct crypto_cipher *fallback;
+	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback)) {
+		printk(KERN_ERR
+		       "Failed to allocate transformation for '%s': %ld\n",
+		       alg, PTR_ERR(fallback));
+		return PTR_ERR(fallback);
+	}
+
+	crypto_cipher_set_flags(fallback,
+				crypto_cipher_get_flags((struct
+							 crypto_cipher *)
+							tfm));
+	ctx->fallback = fallback;
+
+	return 0;
+}
+
+static void p8_aes_exit(struct crypto_tfm *tfm)
+{
+	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->fallback) {
+		crypto_free_cipher(ctx->fallback);
+		ctx->fallback = NULL;
+	}
+}
+
+static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+			 unsigned int keylen)
+{
+	int ret;
+	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	preempt_disable();
+	pagefault_disable();
+	enable_kernel_vsx();
+	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+	disable_kernel_vsx();
+	pagefault_enable();
+	preempt_enable();
+
+	ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
+	return ret;
+}
+
+static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (in_interrupt()) {
+		crypto_cipher_encrypt_one(ctx->fallback, dst, src);
+	} else {
+		preempt_disable();
+		pagefault_disable();
+		enable_kernel_vsx();
+		aes_p8_encrypt(src, dst, &ctx->enc_key);
+		disable_kernel_vsx();
+		pagefault_enable();
+		preempt_enable();
+	}
+}
+
+static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (in_interrupt()) {
+		crypto_cipher_decrypt_one(ctx->fallback, dst, src);
+	} else {
+		preempt_disable();
+		pagefault_disable();
+		enable_kernel_vsx();
+		aes_p8_decrypt(src, dst, &ctx->dec_key);
+		disable_kernel_vsx();
+		pagefault_enable();
+		preempt_enable();
+	}
+}
+
+struct crypto_alg p8_aes_alg = {
+	.cra_name = "aes",
+	.cra_driver_name = "p8_aes",
+	.cra_module = THIS_MODULE,
+	.cra_priority = 1000,
+	.cra_type = NULL,
+	.cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_alignmask = 0,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct p8_aes_ctx),
+	.cra_init = p8_aes_init,
+	.cra_exit = p8_aes_exit,
+	.cra_cipher = {
+		       .cia_min_keysize = AES_MIN_KEY_SIZE,
+		       .cia_max_keysize = AES_MAX_KEY_SIZE,
+		       .cia_setkey = p8_aes_setkey,
+		       .cia_encrypt = p8_aes_encrypt,
+		       .cia_decrypt = p8_aes_decrypt,
+	},
+};
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
new file mode 100644
index 0000000..b718958
--- /dev/null
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -0,0 +1,192 @@
+/**
+ * AES CBC routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_cbc_ctx {
+	struct crypto_skcipher *fallback;
+	struct aes_key enc_key;
+	struct aes_key dec_key;
+};
+
+static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+{
+	const char *alg = crypto_tfm_alg_name(tfm);
+	struct crypto_skcipher *fallback;
+	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	fallback = crypto_alloc_skcipher(alg, 0,
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+	if (IS_ERR(fallback)) {
+		printk(KERN_ERR
+		       "Failed to allocate transformation for '%s': %ld\n",
+		       alg, PTR_ERR(fallback));
+		return PTR_ERR(fallback);
+	}
+
+	crypto_skcipher_set_flags(
+		fallback,
+		crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+	ctx->fallback = fallback;
+
+	return 0;
+}
+
+static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
+{
+	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->fallback) {
+		crypto_free_skcipher(ctx->fallback);
+		ctx->fallback = NULL;
+	}
+}
+
+static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+			     unsigned int keylen)
+{
+	int ret;
+	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	preempt_disable();
+	pagefault_disable();
+	enable_kernel_vsx();
+	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+	disable_kernel_vsx();
+	pagefault_enable();
+	preempt_enable();
+
+	ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	return ret;
+}
+
+static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
+			      struct scatterlist *dst,
+			      struct scatterlist *src, unsigned int nbytes)
+{
+	int ret;
+	struct blkcipher_walk walk;
+	struct p8_aes_cbc_ctx *ctx =
+		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+
+	if (in_interrupt()) {
+		SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+		skcipher_request_set_tfm(req, ctx->fallback);
+		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+		ret = crypto_skcipher_encrypt(req);
+		skcipher_request_zero(req);
+	} else {
+		blkcipher_walk_init(&walk, dst, src, nbytes);
+		ret = blkcipher_walk_virt(desc, &walk);
+		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
+			aes_p8_cbc_encrypt(walk.src.virt.addr,
+					   walk.dst.virt.addr,
+					   nbytes & AES_BLOCK_MASK,
+					   &ctx->enc_key, walk.iv, 1);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+
+			nbytes &= AES_BLOCK_SIZE - 1;
+			ret = blkcipher_walk_done(desc, &walk, nbytes);
+		}
+	}
+
+	return ret;
+}
+
+static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
+			      struct scatterlist *dst,
+			      struct scatterlist *src, unsigned int nbytes)
+{
+	int ret;
+	struct blkcipher_walk walk;
+	struct p8_aes_cbc_ctx *ctx =
+		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+
+	if (in_interrupt()) {
+		SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+		skcipher_request_set_tfm(req, ctx->fallback);
+		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+		ret = crypto_skcipher_decrypt(req);
+		skcipher_request_zero(req);
+	} else {
+		blkcipher_walk_init(&walk, dst, src, nbytes);
+		ret = blkcipher_walk_virt(desc, &walk);
+		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
+			aes_p8_cbc_encrypt(walk.src.virt.addr,
+					   walk.dst.virt.addr,
+					   nbytes & AES_BLOCK_MASK,
+					   &ctx->dec_key, walk.iv, 0);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+
+			nbytes &= AES_BLOCK_SIZE - 1;
+			ret = blkcipher_walk_done(desc, &walk, nbytes);
+		}
+	}
+
+	return ret;
+}
+
+
+struct crypto_alg p8_aes_cbc_alg = {
+	.cra_name = "cbc(aes)",
+	.cra_driver_name = "p8_aes_cbc",
+	.cra_module = THIS_MODULE,
+	.cra_priority = 2000,
+	.cra_type = &crypto_blkcipher_type,
+	.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_alignmask = 0,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
+	.cra_init = p8_aes_cbc_init,
+	.cra_exit = p8_aes_cbc_exit,
+	.cra_blkcipher = {
+			  .ivsize = AES_BLOCK_SIZE,
+			  .min_keysize = AES_MIN_KEY_SIZE,
+			  .max_keysize = AES_MAX_KEY_SIZE,
+			  .setkey = p8_aes_cbc_setkey,
+			  .encrypt = p8_aes_cbc_encrypt,
+			  .decrypt = p8_aes_cbc_decrypt,
+	},
+};
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
new file mode 100644
index 0000000..cd777c7
--- /dev/null
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -0,0 +1,184 @@
+/**
+ * AES CTR routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_ctr_ctx {
+	struct crypto_skcipher *fallback;
+	struct aes_key enc_key;
+};
+
+static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+{
+	const char *alg = crypto_tfm_alg_name(tfm);
+	struct crypto_skcipher *fallback;
+	struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	fallback = crypto_alloc_skcipher(alg, 0,
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback)) {
+		printk(KERN_ERR
+		       "Failed to allocate transformation for '%s': %ld\n",
+		       alg, PTR_ERR(fallback));
+		return PTR_ERR(fallback);
+	}
+
+	crypto_skcipher_set_flags(
+		fallback,
+		crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+	ctx->fallback = fallback;
+
+	return 0;
+}
+
+static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
+{
+	struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->fallback) {
+		crypto_free_skcipher(ctx->fallback);
+		ctx->fallback = NULL;
+	}
+}
+
+static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+			     unsigned int keylen)
+{
+	int ret;
+	struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	preempt_disable();
+	pagefault_disable();
+	enable_kernel_vsx();
+	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+	disable_kernel_vsx();
+	pagefault_enable();
+	preempt_enable();
+
+	ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	return ret;
+}
+
+static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
+			     struct blkcipher_walk *walk)
+{
+	u8 *ctrblk = walk->iv;
+	u8 keystream[AES_BLOCK_SIZE];
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	unsigned int nbytes = walk->nbytes;
+
+	preempt_disable();
+	pagefault_disable();
+	enable_kernel_vsx();
+	aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+	disable_kernel_vsx();
+	pagefault_enable();
+	preempt_enable();
+
+	crypto_xor_cpy(dst, keystream, src, nbytes);
+	crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
+static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
+			    struct scatterlist *dst,
+			    struct scatterlist *src, unsigned int nbytes)
+{
+	int ret;
+	u64 inc;
+	struct blkcipher_walk walk;
+	struct p8_aes_ctr_ctx *ctx =
+		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+
+	if (in_interrupt()) {
+		SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+		skcipher_request_set_tfm(req, ctx->fallback);
+		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+		ret = crypto_skcipher_encrypt(req);
+		skcipher_request_zero(req);
+	} else {
+		blkcipher_walk_init(&walk, dst, src, nbytes);
+		ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+		while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
+			aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
+						    walk.dst.virt.addr,
+						    (nbytes &
+						     AES_BLOCK_MASK) /
+						    AES_BLOCK_SIZE,
+						    &ctx->enc_key,
+						    walk.iv);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+
+			/* We need to update IV mostly for last bytes/round */
+			inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
+			if (inc > 0)
+				while (inc--)
+					crypto_inc(walk.iv, AES_BLOCK_SIZE);
+
+			nbytes &= AES_BLOCK_SIZE - 1;
+			ret = blkcipher_walk_done(desc, &walk, nbytes);
+		}
+		if (walk.nbytes) {
+			p8_aes_ctr_final(ctx, &walk);
+			ret = blkcipher_walk_done(desc, &walk, 0);
+		}
+	}
+
+	return ret;
+}
+
+struct crypto_alg p8_aes_ctr_alg = {
+	.cra_name = "ctr(aes)",
+	.cra_driver_name = "p8_aes_ctr",
+	.cra_module = THIS_MODULE,
+	.cra_priority = 2000,
+	.cra_type = &crypto_blkcipher_type,
+	.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_alignmask = 0,
+	.cra_blocksize = 1,
+	.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
+	.cra_init = p8_aes_ctr_init,
+	.cra_exit = p8_aes_ctr_exit,
+	.cra_blkcipher = {
+			  .ivsize = AES_BLOCK_SIZE,
+			  .min_keysize = AES_MIN_KEY_SIZE,
+			  .max_keysize = AES_MAX_KEY_SIZE,
+			  .setkey = p8_aes_ctr_setkey,
+			  .encrypt = p8_aes_ctr_crypt,
+			  .decrypt = p8_aes_ctr_crypt,
+	},
+};
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
new file mode 100644
index 0000000..e9954a7
--- /dev/null
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -0,0 +1,190 @@
+/**
+ * AES XTS routines supporting VMX In-core instructions on Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundations; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY of FITNESS FOR A PARTICUPAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <crypto/skcipher.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_xts_ctx {
+	struct crypto_skcipher *fallback;
+	struct aes_key enc_key;
+	struct aes_key dec_key;
+	struct aes_key tweak_key;
+};
+
+static int p8_aes_xts_init(struct crypto_tfm *tfm)
+{
+	const char *alg = crypto_tfm_alg_name(tfm);
+	struct crypto_skcipher *fallback;
+	struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	fallback = crypto_alloc_skcipher(alg, 0,
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback)) {
+		printk(KERN_ERR
+			"Failed to allocate transformation for '%s': %ld\n",
+			alg, PTR_ERR(fallback));
+		return PTR_ERR(fallback);
+	}
+
+	crypto_skcipher_set_flags(
+		fallback,
+		crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+	ctx->fallback = fallback;
+
+	return 0;
+}
+
+static void p8_aes_xts_exit(struct crypto_tfm *tfm)
+{
+	struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->fallback) {
+		crypto_free_skcipher(ctx->fallback);
+		ctx->fallback = NULL;
+	}
+}
+
+static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+			     unsigned int keylen)
+{
+	int ret;
+	struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ret = xts_check_key(tfm, key, keylen);
+	if (ret)
+		return ret;
+
+	preempt_disable();
+	pagefault_disable();
+	enable_kernel_vsx();
+	ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
+	ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+	ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+	disable_kernel_vsx();
+	pagefault_enable();
+	preempt_enable();
+
+	ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	return ret;
+}
+
+static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
+			    struct scatterlist *dst,
+			    struct scatterlist *src,
+			    unsigned int nbytes, int enc)
+{
+	int ret;
+	u8 tweak[AES_BLOCK_SIZE];
+	u8 *iv;
+	struct blkcipher_walk walk;
+	struct p8_aes_xts_ctx *ctx =
+		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+
+	if (in_interrupt()) {
+		SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+		skcipher_request_set_tfm(req, ctx->fallback);
+		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+		ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
+		skcipher_request_zero(req);
+	} else {
+		blkcipher_walk_init(&walk, dst, src, nbytes);
+
+		ret = blkcipher_walk_virt(desc, &walk);
+
+		preempt_disable();
+		pagefault_disable();
+		enable_kernel_vsx();
+
+		iv = walk.iv;
+		memset(tweak, 0, AES_BLOCK_SIZE);
+		aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
+
+		disable_kernel_vsx();
+		pagefault_enable();
+		preempt_enable();
+
+		while ((nbytes = walk.nbytes)) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
+			if (enc)
+				aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+						nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
+			else
+				aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+						nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+
+			nbytes &= AES_BLOCK_SIZE - 1;
+			ret = blkcipher_walk_done(desc, &walk, nbytes);
+		}
+	}
+	return ret;
+}
+
+static int p8_aes_xts_encrypt(struct blkcipher_desc *desc,
+			      struct scatterlist *dst,
+			      struct scatterlist *src, unsigned int nbytes)
+{
+	return p8_aes_xts_crypt(desc, dst, src, nbytes, 1);
+}
+
+static int p8_aes_xts_decrypt(struct blkcipher_desc *desc,
+			      struct scatterlist *dst,
+			      struct scatterlist *src, unsigned int nbytes)
+{
+	return p8_aes_xts_crypt(desc, dst, src, nbytes, 0);
+}
+
+struct crypto_alg p8_aes_xts_alg = {
+	.cra_name = "xts(aes)",
+	.cra_driver_name = "p8_aes_xts",
+	.cra_module = THIS_MODULE,
+	.cra_priority = 2000,
+	.cra_type = &crypto_blkcipher_type,
+	.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+	.cra_alignmask = 0,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
+	.cra_init = p8_aes_xts_init,
+	.cra_exit = p8_aes_xts_exit,
+	.cra_blkcipher = {
+			.ivsize = AES_BLOCK_SIZE,
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.setkey	 = p8_aes_xts_setkey,
+			.encrypt = p8_aes_xts_encrypt,
+			.decrypt = p8_aes_xts_decrypt,
+	}
+};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
new file mode 100644
index 0000000..349646b
--- /dev/null
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/types.h>
+#include <crypto/aes.h>
+
+#define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE-1))
+
+struct aes_key {
+	u8 key[AES_MAX_KEYLENGTH];
+	int rounds;
+};
+
+int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
+			   struct aes_key *key);
+int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
+			   struct aes_key *key);
+void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key);
+void aes_p8_decrypt(const u8 *in, u8 *out, const struct aes_key *key);
+void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
+			const struct aes_key *key, u8 *iv, const int enc);
+void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
+				 size_t len, const struct aes_key *key,
+				 const u8 *iv);
+void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
+			const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
+void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
+			const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
new file mode 100644
index 0000000..d6a9f63
--- /dev/null
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -0,0 +1,3828 @@
+#! /usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from CRYPTOGAMs[1] and is included here using the option
+# in the license to distribute the code under the GPL. Therefore this program
+# is free software; you can redistribute it and/or modify it under the terms of
+# the GNU General Public License version 2 as published by the Free Software
+# Foundation.
+#
+# [1] https://www.openssl.org/~appro/cryptogams/
+
+# Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#       * Redistributions of source code must retain copyright notices,
+#         this list of conditions and the following disclaimer.
+#
+#       * Redistributions in binary form must reproduce the above
+#         copyright notice, this list of conditions and the following
+#         disclaimer in the documentation and/or other materials
+#         provided with the distribution.
+#
+#       * Neither the name of the CRYPTOGAMS nor the names of its
+#         copyright holder and contributors may be used to endorse or
+#         promote products derived from this software without specific
+#         prior written permission.
+#
+# ALTERNATIVELY, provided that this notice is retained in full, this
+# product may be distributed under the terms of the GNU General Public
+# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
+# those given above.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for AES instructions as per PowerISA
+# specification version 2.07, first implemented by POWER8 processor.
+# The module is endian-agnostic in sense that it supports both big-
+# and little-endian cases. Data alignment in parallelizable modes is
+# handled with VSX loads and stores, which implies MSR.VSX flag being
+# set. It should also be noted that ISA specification doesn't prohibit
+# alignment exceptions for these instructions on page boundaries.
+# Initially alignment was handled in pure AltiVec/VMX way [when data
+# is aligned programmatically, which in turn guarantees exception-
+# free execution], but it turned to hamper performance when vcipher
+# instructions are interleaved. It's reckoned that eventual
+# misalignment penalties at page boundaries are in average lower
+# than additional overhead in pure AltiVec approach.
+#
+# May 2016
+#
+# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
+# systems were measured.
+#
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+#		CBC en-/decrypt	CTR	XTS
+# POWER8[le]	3.96/0.72	0.74	1.1
+# POWER8[be]	3.75/0.65	0.66	1.0
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+	$UCMP	="cmpld";
+	$SHL	="sldi";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T	=4;
+	$LRSAVE	=$SIZE_T;
+	$STU	="stwu";
+	$POP	="lwz";
+	$PUSH	="stw";
+	$UCMP	="cmplw";
+	$SHL	="slwi";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=8*$SIZE_T;
+$prefix="aes_p8";
+
+$sp="r1";
+$vrsave="r12";
+
+#########################################################################
+{{{	# Key setup procedures						#
+my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
+my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
+my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
+
+$code.=<<___;
+.machine	"any"
+
+.text
+
+.align	7
+rcon:
+.long	0x01000000, 0x01000000, 0x01000000, 0x01000000	?rev
+.long	0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000	?rev
+.long	0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c	?rev
+.long	0,0,0,0						?asis
+Lconsts:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	$ptr	 #vvvvv "distance between . and rcon
+	addi	$ptr,$ptr,-0x48
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+.asciz	"AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl	.${prefix}_set_encrypt_key
+Lset_encrypt_key:
+	mflr		r11
+	$PUSH		r11,$LRSAVE($sp)
+
+	li		$ptr,-1
+	${UCMP}i	$inp,0
+	beq-		Lenc_key_abort		# if ($inp==0) return -1;
+	${UCMP}i	$out,0
+	beq-		Lenc_key_abort		# if ($out==0) return -1;
+	li		$ptr,-2
+	cmpwi		$bits,128
+	blt-		Lenc_key_abort
+	cmpwi		$bits,256
+	bgt-		Lenc_key_abort
+	andi.		r0,$bits,0x3f
+	bne-		Lenc_key_abort
+
+	lis		r0,0xfff0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	bl		Lconsts
+	mtlr		r11
+
+	neg		r9,$inp
+	lvx		$in0,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	lvsr		$key,0,r9		# borrow $key
+	li		r8,0x20
+	cmpwi		$bits,192
+	lvx		$in1,0,$inp
+	le?vspltisb	$mask,0x0f		# borrow $mask
+	lvx		$rcon,0,$ptr
+	le?vxor		$key,$key,$mask		# adjust for byte swap
+	lvx		$mask,r8,$ptr
+	addi		$ptr,$ptr,0x10
+	vperm		$in0,$in0,$in1,$key	# align [and byte swap in LE]
+	li		$cnt,8
+	vxor		$zero,$zero,$zero
+	mtctr		$cnt
+
+	?lvsr		$outperm,0,$out
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$zero,$outmask,$outperm
+
+	blt		Loop128
+	addi		$inp,$inp,8
+	beq		L192
+	addi		$inp,$inp,8
+	b		L256
+
+.align	4
+Loop128:
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+	bdnz		Loop128
+
+	lvx		$rcon,0,$ptr		# last two round keys
+
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vxor		$in0,$in0,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+
+	addi		$inp,$out,15		# 15 is not typo
+	addi		$out,$out,0x50
+
+	li		$rounds,10
+	b		Ldone
+
+.align	4
+L192:
+	lvx		$tmp,0,$inp
+	li		$cnt,4
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+	vperm		$in1,$in1,$tmp,$key	# align [and byte swap in LE]
+	vspltisb	$key,8			# borrow $key
+	mtctr		$cnt
+	vsububm		$mask,$mask,$key	# adjust the mask
+
+Loop192:
+	vperm		$key,$in1,$in1,$mask	# roate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	vcipherlast	$key,$key,$rcon
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+
+	 vsldoi		$stage,$zero,$in1,8
+	vspltw		$tmp,$in0,3
+	vxor		$tmp,$tmp,$in1
+	vsldoi		$in1,$zero,$in1,12	# >>32
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in1,$in1,$tmp
+	vxor		$in0,$in0,$key
+	vxor		$in1,$in1,$key
+	 vsldoi		$stage,$stage,$in0,8
+
+	vperm		$key,$in1,$in1,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$stage,$stage,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	 vsldoi		$stage,$in0,$in1,8
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	 vperm		$outtail,$stage,$stage,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vspltw		$tmp,$in0,3
+	vxor		$tmp,$tmp,$in1
+	vsldoi		$in1,$zero,$in1,12	# >>32
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in1,$in1,$tmp
+	vxor		$in0,$in0,$key
+	vxor		$in1,$in1,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$inp,$out,15		# 15 is not typo
+	 addi		$out,$out,16
+	bdnz		Loop192
+
+	li		$rounds,12
+	addi		$out,$out,0x20
+	b		Ldone
+
+.align	4
+L256:
+	lvx		$tmp,0,$inp
+	li		$cnt,7
+	li		$rounds,14
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+	vperm		$in1,$in1,$tmp,$key	# align [and byte swap in LE]
+	mtctr		$cnt
+
+Loop256:
+	vperm		$key,$in1,$in1,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in1,$in1,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$inp,$out,15		# 15 is not typo
+	 addi		$out,$out,16
+	bdz		Ldone
+
+	vspltw		$key,$in0,3		# just splat
+	vsldoi		$tmp,$zero,$in1,12	# >>32
+	vsbox		$key,$key
+
+	vxor		$in1,$in1,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in1,$in1,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in1,$in1,$tmp
+
+	vxor		$in1,$in1,$key
+	b		Loop256
+
+.align	4
+Ldone:
+	lvx		$in1,0,$inp		# redundant in aligned case
+	vsel		$in1,$outhead,$in1,$outmask
+	stvx		$in1,0,$inp
+	li		$ptr,0
+	mtspr		256,$vrsave
+	stw		$rounds,0($out)
+
+Lenc_key_abort:
+	mr		r3,$ptr
+	blr
+	.long		0
+	.byte		0,12,0x14,1,0,0,3,0
+	.long		0
+.size	.${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
+
+.globl	.${prefix}_set_decrypt_key
+	$STU		$sp,-$FRAME($sp)
+	mflr		r10
+	$PUSH		r10,$FRAME+$LRSAVE($sp)
+	bl		Lset_encrypt_key
+	mtlr		r10
+
+	cmpwi		r3,0
+	bne-		Ldec_key_abort
+
+	slwi		$cnt,$rounds,4
+	subi		$inp,$out,240		# first round key
+	srwi		$rounds,$rounds,1
+	add		$out,$inp,$cnt		# last round key
+	mtctr		$rounds
+
+Ldeckey:
+	lwz		r0, 0($inp)
+	lwz		r6, 4($inp)
+	lwz		r7, 8($inp)
+	lwz		r8, 12($inp)
+	addi		$inp,$inp,16
+	lwz		r9, 0($out)
+	lwz		r10,4($out)
+	lwz		r11,8($out)
+	lwz		r12,12($out)
+	stw		r0, 0($out)
+	stw		r6, 4($out)
+	stw		r7, 8($out)
+	stw		r8, 12($out)
+	subi		$out,$out,16
+	stw		r9, -16($inp)
+	stw		r10,-12($inp)
+	stw		r11,-8($inp)
+	stw		r12,-4($inp)
+	bdnz		Ldeckey
+
+	xor		r3,r3,r3		# return value
+Ldec_key_abort:
+	addi		$sp,$sp,$FRAME
+	blr
+	.long		0
+	.byte		0,12,4,1,0x80,0,3,0
+	.long		0
+.size	.${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
+___
+}}}
+#########################################################################
+{{{	# Single block en- and decrypt procedures			#
+sub gen_block () {
+my $dir = shift;
+my $n   = $dir eq "de" ? "n" : "";
+my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
+
+$code.=<<___;
+.globl	.${prefix}_${dir}crypt
+	lwz		$rounds,240($key)
+	lis		r0,0xfc00
+	mfspr		$vrsave,256
+	li		$idx,15			# 15 is not typo
+	mtspr		256,r0
+
+	lvx		v0,0,$inp
+	neg		r11,$out
+	lvx		v1,$idx,$inp
+	lvsl		v2,0,$inp		# inpperm
+	le?vspltisb	v4,0x0f
+	?lvsl		v3,0,r11		# outperm
+	le?vxor		v2,v2,v4
+	li		$idx,16
+	vperm		v0,v0,v1,v2		# align [and byte swap in LE]
+	lvx		v1,0,$key
+	?lvsl		v5,0,$key		# keyperm
+	srwi		$rounds,$rounds,1
+	lvx		v2,$idx,$key
+	addi		$idx,$idx,16
+	subi		$rounds,$rounds,1
+	?vperm		v1,v1,v2,v5		# align round key
+
+	vxor		v0,v0,v1
+	lvx		v1,$idx,$key
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Loop_${dir}c:
+	?vperm		v2,v2,v1,v5
+	v${n}cipher	v0,v0,v2
+	lvx		v2,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		v1,v1,v2,v5
+	v${n}cipher	v0,v0,v1
+	lvx		v1,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_${dir}c
+
+	?vperm		v2,v2,v1,v5
+	v${n}cipher	v0,v0,v2
+	lvx		v2,$idx,$key
+	?vperm		v1,v1,v2,v5
+	v${n}cipherlast	v0,v0,v1
+
+	vspltisb	v2,-1
+	vxor		v1,v1,v1
+	li		$idx,15			# 15 is not typo
+	?vperm		v2,v1,v2,v3		# outmask
+	le?vxor		v3,v3,v4
+	lvx		v1,0,$out		# outhead
+	vperm		v0,v0,v0,v3		# rotate [and byte swap in LE]
+	vsel		v1,v1,v0,v2
+	lvx		v4,$idx,$out
+	stvx		v1,0,$out
+	vsel		v0,v0,v4,v2
+	stvx		v0,$idx,$out
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,3,0
+	.long		0
+.size	.${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+#########################################################################
+{{{	# CBC en- and decrypt procedures				#
+my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)=		map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)=
+						map("v$_",(4..10));
+$code.=<<___;
+.globl	.${prefix}_cbc_encrypt
+	${UCMP}i	$len,16
+	bltlr-
+
+	cmpwi		$enc,0			# test direction
+	lis		r0,0xffe0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	li		$idx,15
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	le?vspltisb	$tmp,0x0f
+
+	lvx		$ivec,0,$ivp		# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$ivec,$ivec,$inptail,$inpperm
+
+	neg		r11,$inp
+	?lvsl		$keyperm,0,$key		# prepare for unaligned key
+	lwz		$rounds,240($key)
+
+	lvsr		$inpperm,0,r11		# prepare for unaligned load
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	?lvsr		$outperm,0,$out		# prepare for unaligned store
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+
+	srwi		$rounds,$rounds,1
+	li		$idx,16
+	subi		$rounds,$rounds,1
+	beq		Lcbc_dec
+
+Lcbc_enc:
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	mtctr		$rounds
+	subi		$len,$len,16		# len-=16
+
+	lvx		$rndkey0,0,$key
+	 vperm		$inout,$inout,$inptail,$inpperm
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	vxor		$inout,$inout,$ivec
+
+Loop_cbc_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_cbc_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$ivec,$inout,$rndkey0
+	${UCMP}i	$len,16
+
+	vperm		$tmp,$ivec,$ivec,$outperm
+	vsel		$inout,$outhead,$tmp,$outmask
+	vmr		$outhead,$tmp
+	stvx		$inout,0,$out
+	addi		$out,$out,16
+	bge		Lcbc_enc
+
+	b		Lcbc_done
+
+.align	4
+Lcbc_dec:
+	${UCMP}i	$len,128
+	bge		_aesp8_cbc_decrypt8x
+	vmr		$tmp,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	mtctr		$rounds
+	subi		$len,$len,16		# len-=16
+
+	lvx		$rndkey0,0,$key
+	 vperm		$tmp,$tmp,$inptail,$inpperm
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$tmp,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+
+Loop_cbc_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_cbc_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipherlast	$inout,$inout,$rndkey0
+	${UCMP}i	$len,16
+
+	vxor		$inout,$inout,$ivec
+	vmr		$ivec,$tmp
+	vperm		$tmp,$inout,$inout,$outperm
+	vsel		$inout,$outhead,$tmp,$outmask
+	vmr		$outhead,$tmp
+	stvx		$inout,0,$out
+	addi		$out,$out,16
+	bge		Lcbc_dec
+
+Lcbc_done:
+	addi		$out,$out,-1
+	lvx		$inout,0,$out		# redundant in aligned case
+	vsel		$inout,$outhead,$inout,$outmask
+	stvx		$inout,0,$out
+
+	neg		$enc,$ivp		# write [unaligned] iv
+	li		$idx,15			# 15 is not typo
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	vspltisb	$outmask,-1
+	le?vspltisb	$tmp,0x0f
+	?lvsl		$outperm,0,$enc
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+	lvx		$outhead,0,$ivp
+	vperm		$ivec,$ivec,$ivec,$outperm
+	vsel		$inout,$outhead,$ivec,$outmask
+	lvx		$inptail,$idx,$ivp
+	stvx		$inout,0,$ivp
+	vsel		$inout,$ivec,$inptail,$outmask
+	stvx		$inout,$idx,$ivp
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,6,0
+	.long		0
+___
+#########################################################################
+{{	# Optimized CBC decrypt procedure				#
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4);	# aliases with "caller", redundant assignment
+
+$code.=<<___;
+.align	5
+_aesp8_cbc_decrypt8x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	li		r10,`$FRAME+8*16+15`
+	li		r11,`$FRAME+8*16+31`
+	stvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	stvx		v21,r11,$sp
+	addi		r11,r11,32
+	stvx		v22,r10,$sp
+	addi		r10,r10,32
+	stvx		v23,r11,$sp
+	addi		r11,r11,32
+	stvx		v24,r10,$sp
+	addi		r10,r10,32
+	stvx		v25,r11,$sp
+	addi		r11,r11,32
+	stvx		v26,r10,$sp
+	addi		r10,r10,32
+	stvx		v27,r11,$sp
+	addi		r11,r11,32
+	stvx		v28,r10,$sp
+	addi		r10,r10,32
+	stvx		v29,r11,$sp
+	addi		r11,r11,32
+	stvx		v30,r10,$sp
+	stvx		v31,r11,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+	subi		$len,$len,128		# bias
+
+	lvx		$rndkey0,$x00,$key	# load key schedule
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	lvx		v31,$x00,$key
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_cbc_dec_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_cbc_dec_key
+
+	lvx		v26,$x10,$key
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$out0,$x70,$key		# borrow $out0
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$out0,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	#lvx		$inptail,0,$inp		# "caller" already did this
+	#addi		$inp,$inp,15		# 15 is not typo
+	subi		$inp,$inp,15		# undo "caller"
+
+	 le?li		$idx,8
+	lvx_u		$in0,$x00,$inp		# load first 8 "words"
+	 le?lvsl	$inpperm,0,$idx
+	 le?vspltisb	$tmp,0x0f
+	lvx_u		$in1,$x10,$inp
+	 le?vxor	$inpperm,$inpperm,$tmp	# transform for lvx_u/stvx_u
+	lvx_u		$in2,$x20,$inp
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	lvx_u		$in3,$x30,$inp
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	lvx_u		$in4,$x40,$inp
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	vxor		$out0,$in0,$rndkey0
+	lvx_u		$in5,$x50,$inp
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	vxor		$out1,$in1,$rndkey0
+	lvx_u		$in6,$x60,$inp
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	vxor		$out2,$in2,$rndkey0
+	lvx_u		$in7,$x70,$inp
+	addi		$inp,$inp,0x80
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	vxor		$out3,$in3,$rndkey0
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	vxor		$out4,$in4,$rndkey0
+	 le?vperm	$in7,$in7,$in7,$inpperm
+	vxor		$out5,$in5,$rndkey0
+	vxor		$out6,$in6,$rndkey0
+	vxor		$out7,$in7,$rndkey0
+
+	mtctr		$rounds
+	b		Loop_cbc_dec8x
+.align	5
+Loop_cbc_dec8x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_cbc_dec8x
+
+	subic		$len,$len,128		# $len-=128
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+
+	and		r0,r0,$len
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+	vncipher	$out6,$out6,v26
+	vncipher	$out7,$out7,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in7 are loaded
+						# with last "words"
+	vncipher	$out0,$out0,v27
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+	vncipher	$out6,$out6,v27
+	vncipher	$out7,$out7,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	vncipher	$out6,$out6,v28
+	vncipher	$out7,$out7,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	vncipher	$out6,$out6,v29
+	vncipher	$out7,$out7,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+
+	vncipher	$out0,$out0,v30
+	 vxor		$ivec,$ivec,v31		# xor with last round key
+	vncipher	$out1,$out1,v30
+	 vxor		$in0,$in0,v31
+	vncipher	$out2,$out2,v30
+	 vxor		$in1,$in1,v31
+	vncipher	$out3,$out3,v30
+	 vxor		$in2,$in2,v31
+	vncipher	$out4,$out4,v30
+	 vxor		$in3,$in3,v31
+	vncipher	$out5,$out5,v30
+	 vxor		$in4,$in4,v31
+	vncipher	$out6,$out6,v30
+	 vxor		$in5,$in5,v31
+	vncipher	$out7,$out7,v30
+	 vxor		$in6,$in6,v31
+
+	vncipherlast	$out0,$out0,$ivec
+	vncipherlast	$out1,$out1,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	vncipherlast	$out2,$out2,$in1
+	 lvx_u		$in1,$x10,$inp
+	vncipherlast	$out3,$out3,$in2
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	 lvx_u		$in2,$x20,$inp
+	vncipherlast	$out4,$out4,$in3
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	 lvx_u		$in3,$x30,$inp
+	vncipherlast	$out5,$out5,$in4
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	 lvx_u		$in4,$x40,$inp
+	vncipherlast	$out6,$out6,$in5
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	 lvx_u		$in5,$x50,$inp
+	vncipherlast	$out7,$out7,$in6
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	 lvx_u		$in6,$x60,$inp
+	vmr		$ivec,$in7
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	 lvx_u		$in7,$x70,$inp
+	 addi		$inp,$inp,0x80
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	 vxor		$out0,$in0,$rndkey0
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	 le?vperm	$in7,$in7,$in7,$inpperm
+	 vxor		$out1,$in1,$rndkey0
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$rndkey0
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$rndkey0
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$rndkey0
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	 vxor		$out5,$in5,$rndkey0
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x60,$out
+	 vxor		$out6,$in6,$rndkey0
+	stvx_u		$out7,$x70,$out
+	addi		$out,$out,0x80
+	 vxor		$out7,$in7,$rndkey0
+
+	mtctr		$rounds
+	beq		Loop_cbc_dec8x		# did $len-=128 borrow?
+
+	addic.		$len,$len,128
+	beq		Lcbc_dec8x_done
+	nop
+	nop
+
+Loop_cbc_dec8x_tail:				# up to 7 "words" tail...
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_cbc_dec8x_tail
+
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+	vncipher	$out6,$out6,v26
+	vncipher	$out7,$out7,v26
+
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+	vncipher	$out6,$out6,v27
+	vncipher	$out7,$out7,v27
+
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	vncipher	$out6,$out6,v28
+	vncipher	$out7,$out7,v28
+
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	vncipher	$out6,$out6,v29
+	vncipher	$out7,$out7,v29
+
+	vncipher	$out1,$out1,v30
+	 vxor		$ivec,$ivec,v31		# last round key
+	vncipher	$out2,$out2,v30
+	 vxor		$in1,$in1,v31
+	vncipher	$out3,$out3,v30
+	 vxor		$in2,$in2,v31
+	vncipher	$out4,$out4,v30
+	 vxor		$in3,$in3,v31
+	vncipher	$out5,$out5,v30
+	 vxor		$in4,$in4,v31
+	vncipher	$out6,$out6,v30
+	 vxor		$in5,$in5,v31
+	vncipher	$out7,$out7,v30
+	 vxor		$in6,$in6,v31
+
+	cmplwi		$len,32			# switch($len)
+	blt		Lcbc_dec8x_one
+	nop
+	beq		Lcbc_dec8x_two
+	cmplwi		$len,64
+	blt		Lcbc_dec8x_three
+	nop
+	beq		Lcbc_dec8x_four
+	cmplwi		$len,96
+	blt		Lcbc_dec8x_five
+	nop
+	beq		Lcbc_dec8x_six
+
+Lcbc_dec8x_seven:
+	vncipherlast	$out1,$out1,$ivec
+	vncipherlast	$out2,$out2,$in1
+	vncipherlast	$out3,$out3,$in2
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out1,$out1,$out1,$inpperm
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x00,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x10,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x20,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x30,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x40,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x50,$out
+	stvx_u		$out7,$x60,$out
+	addi		$out,$out,0x70
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_six:
+	vncipherlast	$out2,$out2,$ivec
+	vncipherlast	$out3,$out3,$in2
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out2,$out2,$out2,$inpperm
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x00,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x10,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x20,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x30,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x40,$out
+	stvx_u		$out7,$x50,$out
+	addi		$out,$out,0x60
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_five:
+	vncipherlast	$out3,$out3,$ivec
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out3,$out3,$out3,$inpperm
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x00,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x10,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x20,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x30,$out
+	stvx_u		$out7,$x40,$out
+	addi		$out,$out,0x50
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_four:
+	vncipherlast	$out4,$out4,$ivec
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out4,$out4,$out4,$inpperm
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x00,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x10,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x20,$out
+	stvx_u		$out7,$x30,$out
+	addi		$out,$out,0x40
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_three:
+	vncipherlast	$out5,$out5,$ivec
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out5,$out5,$out5,$inpperm
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x00,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x10,$out
+	stvx_u		$out7,$x20,$out
+	addi		$out,$out,0x30
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_two:
+	vncipherlast	$out6,$out6,$ivec
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out6,$out6,$out6,$inpperm
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x00,$out
+	stvx_u		$out7,$x10,$out
+	addi		$out,$out,0x20
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_one:
+	vncipherlast	$out7,$out7,$ivec
+	vmr		$ivec,$in7
+
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out7,0,$out
+	addi		$out,$out,0x10
+
+Lcbc_dec8x_done:
+	le?vperm	$ivec,$ivec,$ivec,$inpperm
+	stvx_u		$ivec,0,$ivp		# write [unaligned] iv
+
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$inpperm,r10,$sp	# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt
+___
+}}	}}}
+
+#########################################################################
+{{{	# CTR procedure[s]						#
+my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)=		map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
+						map("v$_",(4..11));
+my $dat=$tmp;
+
+$code.=<<___;
+.globl	.${prefix}_ctr32_encrypt_blocks
+	${UCMP}i	$len,1
+	bltlr-
+
+	lis		r0,0xfff0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	li		$idx,15
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	le?vspltisb	$tmp,0x0f
+
+	lvx		$ivec,0,$ivp		# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	 vspltisb	$one,1
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$ivec,$ivec,$inptail,$inpperm
+	 vsldoi		$one,$rndkey0,$one,1
+
+	neg		r11,$inp
+	?lvsl		$keyperm,0,$key		# prepare for unaligned key
+	lwz		$rounds,240($key)
+
+	lvsr		$inpperm,0,r11		# prepare for unaligned load
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	srwi		$rounds,$rounds,1
+	li		$idx,16
+	subi		$rounds,$rounds,1
+
+	${UCMP}i	$len,8
+	bge		_aesp8_ctr32_encrypt8x
+
+	?lvsr		$outperm,0,$out		# prepare for unaligned store
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+
+	lvx		$rndkey0,0,$key
+	mtctr		$rounds
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$ivec,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	b		Loop_ctr32_enc
+
+.align	5
+Loop_ctr32_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_ctr32_enc
+
+	vadduwm		$ivec,$ivec,$one
+	 vmr		$dat,$inptail
+	 lvx		$inptail,0,$inp
+	 addi		$inp,$inp,16
+	 subic.		$len,$len,1		# blocks--
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	 vperm		$dat,$dat,$inptail,$inpperm
+	 li		$idx,16
+	?vperm		$rndkey1,$rndkey0,$rndkey1,$keyperm
+	 lvx		$rndkey0,0,$key
+	vxor		$dat,$dat,$rndkey1	# last round key
+	vcipherlast	$inout,$inout,$dat
+
+	 lvx		$rndkey1,$idx,$key
+	 addi		$idx,$idx,16
+	vperm		$inout,$inout,$inout,$outperm
+	vsel		$dat,$outhead,$inout,$outmask
+	 mtctr		$rounds
+	 ?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vmr		$outhead,$inout
+	 vxor		$inout,$ivec,$rndkey0
+	 lvx		$rndkey0,$idx,$key
+	 addi		$idx,$idx,16
+	stvx		$dat,0,$out
+	addi		$out,$out,16
+	bne		Loop_ctr32_enc
+
+	addi		$out,$out,-1
+	lvx		$inout,0,$out		# redundant in aligned case
+	vsel		$inout,$outhead,$inout,$outmask
+	stvx		$inout,0,$out
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,6,0
+	.long		0
+___
+#########################################################################
+{{	# Optimized CTR procedure					#
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4);	# aliases with "caller", redundant assignment
+my ($two,$three,$four)=($outhead,$outperm,$outmask);
+
+$code.=<<___;
+.align	5
+_aesp8_ctr32_encrypt8x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	li		r10,`$FRAME+8*16+15`
+	li		r11,`$FRAME+8*16+31`
+	stvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	stvx		v21,r11,$sp
+	addi		r11,r11,32
+	stvx		v22,r10,$sp
+	addi		r10,r10,32
+	stvx		v23,r11,$sp
+	addi		r11,r11,32
+	stvx		v24,r10,$sp
+	addi		r10,r10,32
+	stvx		v25,r11,$sp
+	addi		r11,r11,32
+	stvx		v26,r10,$sp
+	addi		r10,r10,32
+	stvx		v27,r11,$sp
+	addi		r11,r11,32
+	stvx		v28,r10,$sp
+	addi		r10,r10,32
+	stvx		v29,r11,$sp
+	addi		r11,r11,32
+	stvx		v30,r10,$sp
+	stvx		v31,r11,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key	# load key schedule
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	lvx		v31,$x00,$key
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_ctr32_enc_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_ctr32_enc_key
+
+	lvx		v26,$x10,$key
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$out0,$x70,$key		# borrow $out0
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$out0,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	vadduqm		$two,$one,$one
+	subi		$inp,$inp,15		# undo "caller"
+	$SHL		$len,$len,4
+
+	vadduqm		$out1,$ivec,$one	# counter values ...
+	vadduqm		$out2,$ivec,$two
+	vxor		$out0,$ivec,$rndkey0	# ... xored with rndkey[0]
+	 le?li		$idx,8
+	vadduqm		$out3,$out1,$two
+	vxor		$out1,$out1,$rndkey0
+	 le?lvsl	$inpperm,0,$idx
+	vadduqm		$out4,$out2,$two
+	vxor		$out2,$out2,$rndkey0
+	 le?vspltisb	$tmp,0x0f
+	vadduqm		$out5,$out3,$two
+	vxor		$out3,$out3,$rndkey0
+	 le?vxor	$inpperm,$inpperm,$tmp	# transform for lvx_u/stvx_u
+	vadduqm		$out6,$out4,$two
+	vxor		$out4,$out4,$rndkey0
+	vadduqm		$out7,$out5,$two
+	vxor		$out5,$out5,$rndkey0
+	vadduqm		$ivec,$out6,$two	# next counter value
+	vxor		$out6,$out6,$rndkey0
+	vxor		$out7,$out7,$rndkey0
+
+	mtctr		$rounds
+	b		Loop_ctr32_enc8x
+.align	5
+Loop_ctr32_enc8x:
+	vcipher 	$out0,$out0,v24
+	vcipher 	$out1,$out1,v24
+	vcipher 	$out2,$out2,v24
+	vcipher 	$out3,$out3,v24
+	vcipher 	$out4,$out4,v24
+	vcipher 	$out5,$out5,v24
+	vcipher 	$out6,$out6,v24
+	vcipher 	$out7,$out7,v24
+Loop_ctr32_enc8x_middle:
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher 	$out0,$out0,v25
+	vcipher 	$out1,$out1,v25
+	vcipher 	$out2,$out2,v25
+	vcipher 	$out3,$out3,v25
+	vcipher 	$out4,$out4,v25
+	vcipher 	$out5,$out5,v25
+	vcipher 	$out6,$out6,v25
+	vcipher 	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_ctr32_enc8x
+
+	subic		r11,$len,256		# $len-256, borrow $key_
+	vcipher 	$out0,$out0,v24
+	vcipher 	$out1,$out1,v24
+	vcipher 	$out2,$out2,v24
+	vcipher 	$out3,$out3,v24
+	vcipher 	$out4,$out4,v24
+	vcipher 	$out5,$out5,v24
+	vcipher 	$out6,$out6,v24
+	vcipher 	$out7,$out7,v24
+
+	subfe		r0,r0,r0		# borrow?-1:0
+	vcipher 	$out0,$out0,v25
+	vcipher 	$out1,$out1,v25
+	vcipher 	$out2,$out2,v25
+	vcipher 	$out3,$out3,v25
+	vcipher 	$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+	vcipher		$out6,$out6,v25
+	vcipher		$out7,$out7,v25
+
+	and		r0,r0,r11
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v26
+	vcipher		$out1,$out1,v26
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	vcipher		$out4,$out4,v26
+	vcipher		$out5,$out5,v26
+	vcipher		$out6,$out6,v26
+	vcipher		$out7,$out7,v26
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	subic		$len,$len,129		# $len-=129
+	vcipher		$out0,$out0,v27
+	addi		$len,$len,1		# $len-=128 really
+	vcipher		$out1,$out1,v27
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	vcipher		$out4,$out4,v27
+	vcipher		$out5,$out5,v27
+	vcipher		$out6,$out6,v27
+	vcipher		$out7,$out7,v27
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+
+	vcipher		$out0,$out0,v28
+	 lvx_u		$in0,$x00,$inp		# load input
+	vcipher		$out1,$out1,v28
+	 lvx_u		$in1,$x10,$inp
+	vcipher		$out2,$out2,v28
+	 lvx_u		$in2,$x20,$inp
+	vcipher		$out3,$out3,v28
+	 lvx_u		$in3,$x30,$inp
+	vcipher		$out4,$out4,v28
+	 lvx_u		$in4,$x40,$inp
+	vcipher		$out5,$out5,v28
+	 lvx_u		$in5,$x50,$inp
+	vcipher		$out6,$out6,v28
+	 lvx_u		$in6,$x60,$inp
+	vcipher		$out7,$out7,v28
+	 lvx_u		$in7,$x70,$inp
+	 addi		$inp,$inp,0x80
+
+	vcipher		$out0,$out0,v29
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	vcipher		$out1,$out1,v29
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	vcipher		$out2,$out2,v29
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	vcipher		$out3,$out3,v29
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	vcipher		$out4,$out4,v29
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	vcipher		$out5,$out5,v29
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	vcipher		$out6,$out6,v29
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	vcipher		$out7,$out7,v29
+	 le?vperm	$in7,$in7,$in7,$inpperm
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in7 are loaded
+						# with last "words"
+	subfe.		r0,r0,r0		# borrow?-1:0
+	vcipher		$out0,$out0,v30
+	 vxor		$in0,$in0,v31		# xor with last round key
+	vcipher		$out1,$out1,v30
+	 vxor		$in1,$in1,v31
+	vcipher		$out2,$out2,v30
+	 vxor		$in2,$in2,v31
+	vcipher		$out3,$out3,v30
+	 vxor		$in3,$in3,v31
+	vcipher		$out4,$out4,v30
+	 vxor		$in4,$in4,v31
+	vcipher		$out5,$out5,v30
+	 vxor		$in5,$in5,v31
+	vcipher		$out6,$out6,v30
+	 vxor		$in6,$in6,v31
+	vcipher		$out7,$out7,v30
+	 vxor		$in7,$in7,v31
+
+	bne		Lctr32_enc8x_break	# did $len-129 borrow?
+
+	vcipherlast	$in0,$out0,$in0
+	vcipherlast	$in1,$out1,$in1
+	 vadduqm	$out1,$ivec,$one	# counter values ...
+	vcipherlast	$in2,$out2,$in2
+	 vadduqm	$out2,$ivec,$two
+	 vxor		$out0,$ivec,$rndkey0	# ... xored with rndkey[0]
+	vcipherlast	$in3,$out3,$in3
+	 vadduqm	$out3,$out1,$two
+	 vxor		$out1,$out1,$rndkey0
+	vcipherlast	$in4,$out4,$in4
+	 vadduqm	$out4,$out2,$two
+	 vxor		$out2,$out2,$rndkey0
+	vcipherlast	$in5,$out5,$in5
+	 vadduqm	$out5,$out3,$two
+	 vxor		$out3,$out3,$rndkey0
+	vcipherlast	$in6,$out6,$in6
+	 vadduqm	$out6,$out4,$two
+	 vxor		$out4,$out4,$rndkey0
+	vcipherlast	$in7,$out7,$in7
+	 vadduqm	$out7,$out5,$two
+	 vxor		$out5,$out5,$rndkey0
+	le?vperm	$in0,$in0,$in0,$inpperm
+	 vadduqm	$ivec,$out6,$two	# next counter value
+	 vxor		$out6,$out6,$rndkey0
+	le?vperm	$in1,$in1,$in1,$inpperm
+	 vxor		$out7,$out7,$rndkey0
+	mtctr		$rounds
+
+	 vcipher	$out0,$out0,v24
+	stvx_u		$in0,$x00,$out
+	le?vperm	$in2,$in2,$in2,$inpperm
+	 vcipher	$out1,$out1,v24
+	stvx_u		$in1,$x10,$out
+	le?vperm	$in3,$in3,$in3,$inpperm
+	 vcipher	$out2,$out2,v24
+	stvx_u		$in2,$x20,$out
+	le?vperm	$in4,$in4,$in4,$inpperm
+	 vcipher	$out3,$out3,v24
+	stvx_u		$in3,$x30,$out
+	le?vperm	$in5,$in5,$in5,$inpperm
+	 vcipher	$out4,$out4,v24
+	stvx_u		$in4,$x40,$out
+	le?vperm	$in6,$in6,$in6,$inpperm
+	 vcipher	$out5,$out5,v24
+	stvx_u		$in5,$x50,$out
+	le?vperm	$in7,$in7,$in7,$inpperm
+	 vcipher	$out6,$out6,v24
+	stvx_u		$in6,$x60,$out
+	 vcipher	$out7,$out7,v24
+	stvx_u		$in7,$x70,$out
+	addi		$out,$out,0x80
+
+	b		Loop_ctr32_enc8x_middle
+
+.align	5
+Lctr32_enc8x_break:
+	cmpwi		$len,-0x60
+	blt		Lctr32_enc8x_one
+	nop
+	beq		Lctr32_enc8x_two
+	cmpwi		$len,-0x40
+	blt		Lctr32_enc8x_three
+	nop
+	beq		Lctr32_enc8x_four
+	cmpwi		$len,-0x20
+	blt		Lctr32_enc8x_five
+	nop
+	beq		Lctr32_enc8x_six
+	cmpwi		$len,0x00
+	blt		Lctr32_enc8x_seven
+
+Lctr32_enc8x_eight:
+	vcipherlast	$out0,$out0,$in0
+	vcipherlast	$out1,$out1,$in1
+	vcipherlast	$out2,$out2,$in2
+	vcipherlast	$out3,$out3,$in3
+	vcipherlast	$out4,$out4,$in4
+	vcipherlast	$out5,$out5,$in5
+	vcipherlast	$out6,$out6,$in6
+	vcipherlast	$out7,$out7,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x60,$out
+	stvx_u		$out7,$x70,$out
+	addi		$out,$out,0x80
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_seven:
+	vcipherlast	$out0,$out0,$in1
+	vcipherlast	$out1,$out1,$in2
+	vcipherlast	$out2,$out2,$in3
+	vcipherlast	$out3,$out3,$in4
+	vcipherlast	$out4,$out4,$in5
+	vcipherlast	$out5,$out5,$in6
+	vcipherlast	$out6,$out6,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	stvx_u		$out6,$x60,$out
+	addi		$out,$out,0x70
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_six:
+	vcipherlast	$out0,$out0,$in2
+	vcipherlast	$out1,$out1,$in3
+	vcipherlast	$out2,$out2,$in4
+	vcipherlast	$out3,$out3,$in5
+	vcipherlast	$out4,$out4,$in6
+	vcipherlast	$out5,$out5,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	stvx_u		$out5,$x50,$out
+	addi		$out,$out,0x60
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_five:
+	vcipherlast	$out0,$out0,$in3
+	vcipherlast	$out1,$out1,$in4
+	vcipherlast	$out2,$out2,$in5
+	vcipherlast	$out3,$out3,$in6
+	vcipherlast	$out4,$out4,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_four:
+	vcipherlast	$out0,$out0,$in4
+	vcipherlast	$out1,$out1,$in5
+	vcipherlast	$out2,$out2,$in6
+	vcipherlast	$out3,$out3,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_three:
+	vcipherlast	$out0,$out0,$in5
+	vcipherlast	$out1,$out1,$in6
+	vcipherlast	$out2,$out2,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	b		Lcbc_dec8x_done
+
+.align	5
+Lctr32_enc8x_two:
+	vcipherlast	$out0,$out0,$in6
+	vcipherlast	$out1,$out1,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	b		Lcbc_dec8x_done
+
+.align	5
+Lctr32_enc8x_one:
+	vcipherlast	$out0,$out0,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	stvx_u		$out0,0,$out
+	addi		$out,$out,0x10
+
+Lctr32_enc8x_done:
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$inpperm,r10,$sp	# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks
+___
+}}	}}}
+
+#########################################################################
+{{{	# XTS procedures						#
+# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len,	#
+#                             const AES_KEY *key1, const AES_KEY *key2,	#
+#                             [const] unsigned char iv[16]);		#
+# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which	#
+# input tweak value is assumed to be encrypted already, and last tweak	#
+# value, one suitable for consecutive call on same chunk of data, is	#
+# written back to original buffer. In addition, in "tweak chaining"	#
+# mode only complete input blocks are processed.			#
+
+my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) =	map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout) =				map("v$_",(0..2));
+my ($output,$inptail,$inpperm,$leperm,$keyperm) =	map("v$_",(3..7));
+my ($tweak,$seven,$eighty7,$tmp,$tweak1) =		map("v$_",(8..12));
+my $taillen = $key2;
+
+   ($inp,$idx) = ($idx,$inp);				# reassign
+
+$code.=<<___;
+.globl	.${prefix}_xts_encrypt
+	mr		$inp,r3				# reassign
+	li		r3,-1
+	${UCMP}i	$len,16
+	bltlr-
+
+	lis		r0,0xfff0
+	mfspr		r12,256				# save vrsave
+	li		r11,0
+	mtspr		256,r0
+
+	vspltisb	$seven,0x07			# 0x070707..07
+	le?lvsl		$leperm,r11,r11
+	le?vspltisb	$tmp,0x0f
+	le?vxor		$leperm,$leperm,$seven
+
+	li		$idx,15
+	lvx		$tweak,0,$ivp			# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$tweak,$tweak,$inptail,$inpperm
+
+	neg		r11,$inp
+	lvsr		$inpperm,0,r11			# prepare for unaligned load
+	lvx		$inout,0,$inp
+	addi		$inp,$inp,15			# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	${UCMP}i	$key2,0				# key2==NULL?
+	beq		Lxts_enc_no_key2
+
+	?lvsl		$keyperm,0,$key2		# prepare for unaligned key
+	lwz		$rounds,240($key2)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	lvx		$rndkey0,0,$key2
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Ltweak_xts_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	bdnz		Ltweak_xts_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$tweak,$tweak,$rndkey0
+
+	li		$ivp,0				# don't chain the tweak
+	b		Lxts_enc
+
+Lxts_enc_no_key2:
+	li		$idx,-16
+	and		$len,$len,$idx			# in "tweak chaining"
+							# mode only complete
+							# blocks are processed
+Lxts_enc:
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+
+	?lvsl		$keyperm,0,$key1		# prepare for unaligned key
+	lwz		$rounds,240($key1)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	vslb		$eighty7,$seven,$seven		# 0x808080..80
+	vor		$eighty7,$eighty7,$seven	# 0x878787..87
+	vspltisb	$tmp,1				# 0x010101..01
+	vsldoi		$eighty7,$eighty7,$tmp,15	# 0x870101..01
+
+	${UCMP}i	$len,96
+	bge		_aesp8_xts_encrypt6x
+
+	andi.		$taillen,$len,15
+	subic		r0,$len,32
+	subi		$taillen,$taillen,16
+	subfe		r0,r0,r0
+	and		r0,r0,$taillen
+	add		$inp,$inp,r0
+
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	mtctr		$rounds
+	b		Loop_xts_enc
+
+.align	5
+Loop_xts_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak
+	vcipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+	addi		$out,$out,16
+
+	subic.		$len,$len,16
+	beq		Lxts_enc_done
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+
+	subic		r0,$len,32
+	subfe		r0,r0,r0
+	and		r0,r0,$taillen
+	add		$inp,$inp,r0
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$output,$output,$rndkey0	# just in case $len<16
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	mtctr		$rounds
+	${UCMP}i	$len,16
+	bge		Loop_xts_enc
+
+	vxor		$output,$output,$tweak
+	lvsr		$inpperm,0,$len			# $inpperm is no longer needed
+	vxor		$inptail,$inptail,$inptail	# $inptail is no longer needed
+	vspltisb	$tmp,-1
+	vperm		$inptail,$inptail,$tmp,$inpperm
+	vsel		$inout,$inout,$output,$inptail
+
+	subi		r11,$out,17
+	subi		$out,$out,16
+	mtctr		$len
+	li		$len,16
+Loop_xts_enc_steal:
+	lbzu		r0,1(r11)
+	stb		r0,16(r11)
+	bdnz		Loop_xts_enc_steal
+
+	mtctr		$rounds
+	b		Loop_xts_enc			# one more time...
+
+Lxts_enc_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_enc_ret
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_enc_ret:
+	mtspr		256,r12				# restore vrsave
+	li		r3,0
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
+
+.globl	.${prefix}_xts_decrypt
+	mr		$inp,r3				# reassign
+	li		r3,-1
+	${UCMP}i	$len,16
+	bltlr-
+
+	lis		r0,0xfff8
+	mfspr		r12,256				# save vrsave
+	li		r11,0
+	mtspr		256,r0
+
+	andi.		r0,$len,15
+	neg		r0,r0
+	andi.		r0,r0,16
+	sub		$len,$len,r0
+
+	vspltisb	$seven,0x07			# 0x070707..07
+	le?lvsl		$leperm,r11,r11
+	le?vspltisb	$tmp,0x0f
+	le?vxor		$leperm,$leperm,$seven
+
+	li		$idx,15
+	lvx		$tweak,0,$ivp			# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$tweak,$tweak,$inptail,$inpperm
+
+	neg		r11,$inp
+	lvsr		$inpperm,0,r11			# prepare for unaligned load
+	lvx		$inout,0,$inp
+	addi		$inp,$inp,15			# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	${UCMP}i	$key2,0				# key2==NULL?
+	beq		Lxts_dec_no_key2
+
+	?lvsl		$keyperm,0,$key2		# prepare for unaligned key
+	lwz		$rounds,240($key2)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	lvx		$rndkey0,0,$key2
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Ltweak_xts_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	bdnz		Ltweak_xts_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$tweak,$tweak,$rndkey0
+
+	li		$ivp,0				# don't chain the tweak
+	b		Lxts_dec
+
+Lxts_dec_no_key2:
+	neg		$idx,$len
+	andi.		$idx,$idx,15
+	add		$len,$len,$idx			# in "tweak chaining"
+							# mode only complete
+							# blocks are processed
+Lxts_dec:
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+
+	?lvsl		$keyperm,0,$key1		# prepare for unaligned key
+	lwz		$rounds,240($key1)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	vslb		$eighty7,$seven,$seven		# 0x808080..80
+	vor		$eighty7,$eighty7,$seven	# 0x878787..87
+	vspltisb	$tmp,1				# 0x010101..01
+	vsldoi		$eighty7,$eighty7,$tmp,15	# 0x870101..01
+
+	${UCMP}i	$len,96
+	bge		_aesp8_xts_decrypt6x
+
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+	${UCMP}i	$len,16
+	blt		Ltail_xts_dec
+	be?b		Loop_xts_dec
+
+.align	5
+Loop_xts_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak
+	vncipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+	addi		$out,$out,16
+
+	subic.		$len,$len,16
+	beq		Lxts_dec_done
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	mtctr		$rounds
+	${UCMP}i	$len,16
+	bge		Loop_xts_dec
+
+Ltail_xts_dec:
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak1,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak1,$tweak1,$tmp
+
+	subi		$inp,$inp,16
+	add		$inp,$inp,$len
+
+	vxor		$inout,$inout,$tweak		# :-(
+	vxor		$inout,$inout,$tweak1		# :-)
+
+Loop_xts_dec_short:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_dec_short
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak1
+	vncipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	#addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+
+	lvsr		$inpperm,0,$len			# $inpperm is no longer needed
+	vxor		$inptail,$inptail,$inptail	# $inptail is no longer needed
+	vspltisb	$tmp,-1
+	vperm		$inptail,$inptail,$tmp,$inpperm
+	vsel		$inout,$inout,$output,$inptail
+
+	vxor		$rndkey0,$rndkey0,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	subi		r11,$out,1
+	mtctr		$len
+	li		$len,16
+Loop_xts_dec_steal:
+	lbzu		r0,1(r11)
+	stb		r0,16(r11)
+	bdnz		Loop_xts_dec_steal
+
+	mtctr		$rounds
+	b		Loop_xts_dec			# one more time...
+
+Lxts_dec_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_dec_ret
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_dec_ret:
+	mtspr		256,r12				# restore vrsave
+	li		r3,0
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
+___
+#########################################################################
+{{	# Optimized XTS procedures					#
+my $key_=$key2;
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31));
+    $x00=0 if ($flavour =~ /osx/);
+my ($in0,  $in1,  $in2,  $in3,  $in4,  $in5 )=map("v$_",(0..5));
+my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16));
+my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($keyperm)=($out0);	# aliases with "caller", redundant assignment
+my $taillen=$x70;
+
+$code.=<<___;
+.align	5
+_aesp8_xts_encrypt6x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	mflr		r11
+	li		r7,`$FRAME+8*16+15`
+	li		r3,`$FRAME+8*16+31`
+	$PUSH		r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+	stvx		v20,r7,$sp		# ABI says so
+	addi		r7,r7,32
+	stvx		v21,r3,$sp
+	addi		r3,r3,32
+	stvx		v22,r7,$sp
+	addi		r7,r7,32
+	stvx		v23,r3,$sp
+	addi		r3,r3,32
+	stvx		v24,r7,$sp
+	addi		r7,r7,32
+	stvx		v25,r3,$sp
+	addi		r3,r3,32
+	stvx		v26,r7,$sp
+	addi		r7,r7,32
+	stvx		v27,r3,$sp
+	addi		r3,r3,32
+	stvx		v28,r7,$sp
+	addi		r7,r7,32
+	stvx		v29,r3,$sp
+	addi		r3,r3,32
+	stvx		v30,r7,$sp
+	stvx		v31,r3,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key1	# load key schedule
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	lvx		v31,$x00,$key1
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_xts_enc_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key1
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_xts_enc_key
+
+	lvx		v26,$x10,$key1
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key1
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key1
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key1
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key1
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key1
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$twk5,$x70,$key1	# borrow $twk5
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$twk5,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	 vperm		$in0,$inout,$inptail,$inpperm
+	 subi		$inp,$inp,31		# undo "caller"
+	vxor		$twk0,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out0,$in0,$twk0
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in1,$x10,$inp
+	vxor		$twk1,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in1,$in1,$in1,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out1,$in1,$twk1
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in2,$x20,$inp
+	 andi.		$taillen,$len,15
+	vxor		$twk2,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in2,$in2,$in2,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out2,$in2,$twk2
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in3,$x30,$inp
+	 sub		$len,$len,$taillen
+	vxor		$twk3,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in3,$in3,$in3,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out3,$in3,$twk3
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in4,$x40,$inp
+	 subi		$len,$len,0x60
+	vxor		$twk4,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in4,$in4,$in4,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out4,$in4,$twk4
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	vxor		$twk5,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in5,$in5,$in5,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out5,$in5,$twk5
+	vxor		$tweak,$tweak,$tmp
+
+	vxor		v31,v31,$rndkey0
+	mtctr		$rounds
+	b		Loop_xts_enc6x
+
+.align	5
+Loop_xts_enc6x:
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+	vcipher		$out5,$out5,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_enc6x
+
+	subic		$len,$len,96		# $len-=96
+	 vxor		$in0,$twk0,v31		# xor with last round key
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk0,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out4,$out4,v24
+	vcipher		$out5,$out5,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	 vxor		$in1,$twk1,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk1,$tweak,$rndkey0
+	vcipher		$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+
+	and		r0,r0,$len
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out0,$out0,v26
+	vcipher		$out1,$out1,v26
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out4,$out4,v26
+	vcipher		$out5,$out5,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in5 are loaded
+						# with last "words"
+	 vxor		$in2,$twk2,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk2,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vcipher		$out0,$out0,v27
+	vcipher		$out1,$out1,v27
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out4,$out4,v27
+	vcipher		$out5,$out5,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out0,$out0,v28
+	vcipher		$out1,$out1,v28
+	 vxor		$in3,$twk3,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk3,$tweak,$rndkey0
+	vcipher		$out2,$out2,v28
+	vcipher		$out3,$out3,v28
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out4,$out4,v28
+	vcipher		$out5,$out5,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vand		$tmp,$tmp,$eighty7
+
+	vcipher		$out0,$out0,v29
+	vcipher		$out1,$out1,v29
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out2,$out2,v29
+	vcipher		$out3,$out3,v29
+	 vxor		$in4,$twk4,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk4,$tweak,$rndkey0
+	vcipher		$out4,$out4,v29
+	vcipher		$out5,$out5,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+
+	vcipher		$out0,$out0,v30
+	vcipher		$out1,$out1,v30
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out2,$out2,v30
+	vcipher		$out3,$out3,v30
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out4,$out4,v30
+	vcipher		$out5,$out5,v30
+	 vxor		$in5,$twk5,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk5,$tweak,$rndkey0
+
+	vcipherlast	$out0,$out0,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipherlast	$out1,$out1,$in1
+	 lvx_u		$in1,$x10,$inp
+	vcipherlast	$out2,$out2,$in2
+	 le?vperm	$in0,$in0,$in0,$leperm
+	 lvx_u		$in2,$x20,$inp
+	 vand		$tmp,$tmp,$eighty7
+	vcipherlast	$out3,$out3,$in3
+	 le?vperm	$in1,$in1,$in1,$leperm
+	 lvx_u		$in3,$x30,$inp
+	vcipherlast	$out4,$out4,$in4
+	 le?vperm	$in2,$in2,$in2,$leperm
+	 lvx_u		$in4,$x40,$inp
+	 vxor		$tweak,$tweak,$tmp
+	vcipherlast	$tmp,$out5,$in5		# last block might be needed
+						# in stealing mode
+	 le?vperm	$in3,$in3,$in3,$leperm
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	 le?vperm	$in4,$in4,$in4,$leperm
+	 le?vperm	$in5,$in5,$in5,$leperm
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	 vxor		$out0,$in0,$twk0
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	 vxor		$out1,$in1,$twk1
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$twk2
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$twk3
+	le?vperm	$out5,$tmp,$tmp,$leperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$twk4
+	le?stvx_u	$out5,$x50,$out
+	be?stvx_u	$tmp, $x50,$out
+	 vxor		$out5,$in5,$twk5
+	addi		$out,$out,0x60
+
+	mtctr		$rounds
+	beq		Loop_xts_enc6x		# did $len-=96 borrow?
+
+	addic.		$len,$len,0x60
+	beq		Lxts_enc6x_zero
+	cmpwi		$len,0x20
+	blt		Lxts_enc6x_one
+	nop
+	beq		Lxts_enc6x_two
+	cmpwi		$len,0x40
+	blt		Lxts_enc6x_three
+	nop
+	beq		Lxts_enc6x_four
+
+Lxts_enc6x_five:
+	vxor		$out0,$in1,$twk0
+	vxor		$out1,$in2,$twk1
+	vxor		$out2,$in3,$twk2
+	vxor		$out3,$in4,$twk3
+	vxor		$out4,$in5,$twk4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk5		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	vxor		$tmp,$out4,$twk5	# last block prep for stealing
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_four:
+	vxor		$out0,$in2,$twk0
+	vxor		$out1,$in3,$twk1
+	vxor		$out2,$in4,$twk2
+	vxor		$out3,$in5,$twk3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk4		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	vxor		$tmp,$out3,$twk4	# last block prep for stealing
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_three:
+	vxor		$out0,$in3,$twk0
+	vxor		$out1,$in4,$twk1
+	vxor		$out2,$in5,$twk2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk3		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$tmp,$out2,$twk3	# last block prep for stealing
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_two:
+	vxor		$out0,$in4,$twk0
+	vxor		$out1,$in5,$twk1
+	vxor		$out2,$out2,$out2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk2		# unused tweak
+	vxor		$tmp,$out1,$twk2	# last block prep for stealing
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_one:
+	vxor		$out0,$in5,$twk0
+	nop
+Loop_xts_enc1x:
+	vcipher		$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_enc1x
+
+	add		$inp,$inp,$taillen
+	cmpwi		$taillen,0
+	vcipher		$out0,$out0,v24
+
+	subi		$inp,$inp,16
+	vcipher		$out0,$out0,v25
+
+	lvsr		$inpperm,0,$taillen
+	vcipher		$out0,$out0,v26
+
+	lvx_u		$in0,0,$inp
+	vcipher		$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vcipher		$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk0,$twk0,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vcipher		$out0,$out0,v30
+
+	vperm		$in0,$in0,$in0,$inpperm
+	vcipherlast	$out0,$out0,$twk0
+
+	vmr		$twk0,$twk1		# unused tweak
+	vxor		$tmp,$out0,$twk1	# last block prep for stealing
+	le?vperm	$out0,$out0,$out0,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	addi		$out,$out,0x10
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_zero:
+	cmpwi		$taillen,0
+	beq		Lxts_enc6x_done
+
+	add		$inp,$inp,$taillen
+	subi		$inp,$inp,16
+	lvx_u		$in0,0,$inp
+	lvsr		$inpperm,0,$taillen	# $in5 is no more
+	le?vperm	$in0,$in0,$in0,$leperm
+	vperm		$in0,$in0,$in0,$inpperm
+	vxor		$tmp,$tmp,$twk0
+Lxts_enc6x_steal:
+	vxor		$in0,$in0,$twk0
+	vxor		$out0,$out0,$out0
+	vspltisb	$out1,-1
+	vperm		$out0,$out0,$out1,$inpperm
+	vsel		$out0,$in0,$tmp,$out0	# $tmp is last block, remember?
+
+	subi		r30,$out,17
+	subi		$out,$out,16
+	mtctr		$taillen
+Loop_xts_enc6x_steal:
+	lbzu		r0,1(r30)
+	stb		r0,16(r30)
+	bdnz		Loop_xts_enc6x_steal
+
+	li		$taillen,0
+	mtctr		$rounds
+	b		Loop_xts_enc1x		# one more time...
+
+.align	4
+Lxts_enc6x_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_enc6x_ret
+
+	vxor		$tweak,$twk0,$rndkey0
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_enc6x_ret:
+	mtlr		r11
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$seven,r10,$sp		# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,1,0x80,6,6,0
+	.long		0
+
+.align	5
+_aesp8_xts_enc5x:
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		_aesp8_xts_enc5x
+
+	add		$inp,$inp,$taillen
+	cmpwi		$taillen,0
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+
+	subi		$inp,$inp,16
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	 vxor		$twk0,$twk0,v31
+
+	vcipher		$out0,$out0,v26
+	lvsr		$inpperm,r0,$taillen	# $in5 is no more
+	vcipher		$out1,$out1,v26
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	vcipher		$out4,$out4,v26
+	 vxor		$in1,$twk1,v31
+
+	vcipher		$out0,$out0,v27
+	lvx_u		$in0,0,$inp
+	vcipher		$out1,$out1,v27
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	vcipher		$out4,$out4,v27
+	 vxor		$in2,$twk2,v31
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v28
+	vcipher		$out1,$out1,v28
+	vcipher		$out2,$out2,v28
+	vcipher		$out3,$out3,v28
+	vcipher		$out4,$out4,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vxor		$in3,$twk3,v31
+
+	vcipher		$out0,$out0,v29
+	le?vperm	$in0,$in0,$in0,$leperm
+	vcipher		$out1,$out1,v29
+	vcipher		$out2,$out2,v29
+	vcipher		$out3,$out3,v29
+	vcipher		$out4,$out4,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$in4,$twk4,v31
+
+	vcipher		$out0,$out0,v30
+	vperm		$in0,$in0,$in0,$inpperm
+	vcipher		$out1,$out1,v30
+	vcipher		$out2,$out2,v30
+	vcipher		$out3,$out3,v30
+	vcipher		$out4,$out4,v30
+
+	vcipherlast	$out0,$out0,$twk0
+	vcipherlast	$out1,$out1,$in1
+	vcipherlast	$out2,$out2,$in2
+	vcipherlast	$out3,$out3,$in3
+	vcipherlast	$out4,$out4,$in4
+	blr
+        .long   	0
+        .byte   	0,12,0x14,0,0,0,0,0
+
+.align	5
+_aesp8_xts_decrypt6x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	mflr		r11
+	li		r7,`$FRAME+8*16+15`
+	li		r3,`$FRAME+8*16+31`
+	$PUSH		r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+	stvx		v20,r7,$sp		# ABI says so
+	addi		r7,r7,32
+	stvx		v21,r3,$sp
+	addi		r3,r3,32
+	stvx		v22,r7,$sp
+	addi		r7,r7,32
+	stvx		v23,r3,$sp
+	addi		r3,r3,32
+	stvx		v24,r7,$sp
+	addi		r7,r7,32
+	stvx		v25,r3,$sp
+	addi		r3,r3,32
+	stvx		v26,r7,$sp
+	addi		r7,r7,32
+	stvx		v27,r3,$sp
+	addi		r3,r3,32
+	stvx		v28,r7,$sp
+	addi		r7,r7,32
+	stvx		v29,r3,$sp
+	addi		r3,r3,32
+	stvx		v30,r7,$sp
+	stvx		v31,r3,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key1	# load key schedule
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	lvx		v31,$x00,$key1
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_xts_dec_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key1
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_xts_dec_key
+
+	lvx		v26,$x10,$key1
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key1
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key1
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key1
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key1
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key1
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$twk5,$x70,$key1	# borrow $twk5
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$twk5,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	 vperm		$in0,$inout,$inptail,$inpperm
+	 subi		$inp,$inp,31		# undo "caller"
+	vxor		$twk0,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out0,$in0,$twk0
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in1,$x10,$inp
+	vxor		$twk1,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in1,$in1,$in1,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out1,$in1,$twk1
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in2,$x20,$inp
+	 andi.		$taillen,$len,15
+	vxor		$twk2,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in2,$in2,$in2,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out2,$in2,$twk2
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in3,$x30,$inp
+	 sub		$len,$len,$taillen
+	vxor		$twk3,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in3,$in3,$in3,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out3,$in3,$twk3
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in4,$x40,$inp
+	 subi		$len,$len,0x60
+	vxor		$twk4,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in4,$in4,$in4,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out4,$in4,$twk4
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	vxor		$twk5,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in5,$in5,$in5,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out5,$in5,$twk5
+	vxor		$tweak,$tweak,$tmp
+
+	vxor		v31,v31,$rndkey0
+	mtctr		$rounds
+	b		Loop_xts_dec6x
+
+.align	5
+Loop_xts_dec6x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_dec6x
+
+	subic		$len,$len,96		# $len-=96
+	 vxor		$in0,$twk0,v31		# xor with last round key
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk0,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	 vxor		$in1,$twk1,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk1,$tweak,$rndkey0
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+
+	and		r0,r0,$len
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in5 are loaded
+						# with last "words"
+	 vxor		$in2,$twk2,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk2,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vncipher	$out0,$out0,v27
+	vncipher	$out1,$out1,v27
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	 vxor		$in3,$twk3,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk3,$tweak,$rndkey0
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vand		$tmp,$tmp,$eighty7
+
+	vncipher	$out0,$out0,v29
+	vncipher	$out1,$out1,v29
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	 vxor		$in4,$twk4,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk4,$tweak,$rndkey0
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+
+	vncipher	$out0,$out0,v30
+	vncipher	$out1,$out1,v30
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out2,$out2,v30
+	vncipher	$out3,$out3,v30
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out4,$out4,v30
+	vncipher	$out5,$out5,v30
+	 vxor		$in5,$twk5,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk5,$tweak,$rndkey0
+
+	vncipherlast	$out0,$out0,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipherlast	$out1,$out1,$in1
+	 lvx_u		$in1,$x10,$inp
+	vncipherlast	$out2,$out2,$in2
+	 le?vperm	$in0,$in0,$in0,$leperm
+	 lvx_u		$in2,$x20,$inp
+	 vand		$tmp,$tmp,$eighty7
+	vncipherlast	$out3,$out3,$in3
+	 le?vperm	$in1,$in1,$in1,$leperm
+	 lvx_u		$in3,$x30,$inp
+	vncipherlast	$out4,$out4,$in4
+	 le?vperm	$in2,$in2,$in2,$leperm
+	 lvx_u		$in4,$x40,$inp
+	 vxor		$tweak,$tweak,$tmp
+	vncipherlast	$out5,$out5,$in5
+	 le?vperm	$in3,$in3,$in3,$leperm
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	 le?vperm	$in4,$in4,$in4,$leperm
+	 le?vperm	$in5,$in5,$in5,$leperm
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	 vxor		$out0,$in0,$twk0
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	 vxor		$out1,$in1,$twk1
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$twk2
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$twk3
+	le?vperm	$out5,$out5,$out5,$leperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$twk4
+	stvx_u		$out5,$x50,$out
+	 vxor		$out5,$in5,$twk5
+	addi		$out,$out,0x60
+
+	mtctr		$rounds
+	beq		Loop_xts_dec6x		# did $len-=96 borrow?
+
+	addic.		$len,$len,0x60
+	beq		Lxts_dec6x_zero
+	cmpwi		$len,0x20
+	blt		Lxts_dec6x_one
+	nop
+	beq		Lxts_dec6x_two
+	cmpwi		$len,0x40
+	blt		Lxts_dec6x_three
+	nop
+	beq		Lxts_dec6x_four
+
+Lxts_dec6x_five:
+	vxor		$out0,$in1,$twk0
+	vxor		$out1,$in2,$twk1
+	vxor		$out2,$in3,$twk2
+	vxor		$out3,$in4,$twk3
+	vxor		$out4,$in5,$twk4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk5		# unused tweak
+	vxor		$twk1,$tweak,$rndkey0
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk1
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_four:
+	vxor		$out0,$in2,$twk0
+	vxor		$out1,$in3,$twk1
+	vxor		$out2,$in4,$twk2
+	vxor		$out3,$in5,$twk3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk4		# unused tweak
+	vmr		$twk1,$twk5
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk5
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_three:
+	vxor		$out0,$in3,$twk0
+	vxor		$out1,$in4,$twk1
+	vxor		$out2,$in5,$twk2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk3		# unused tweak
+	vmr		$twk1,$twk4
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk4
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_two:
+	vxor		$out0,$in4,$twk0
+	vxor		$out1,$in5,$twk1
+	vxor		$out2,$out2,$out2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk2		# unused tweak
+	vmr		$twk1,$twk3
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk3
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_one:
+	vxor		$out0,$in5,$twk0
+	nop
+Loop_xts_dec1x:
+	vncipher	$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_dec1x
+
+	subi		r0,$taillen,1
+	vncipher	$out0,$out0,v24
+
+	andi.		r0,r0,16
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+
+	sub		$inp,$inp,r0
+	vncipher	$out0,$out0,v26
+
+	lvx_u		$in0,0,$inp
+	vncipher	$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk0,$twk0,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out0,$out0,v30
+
+	mtctr		$rounds
+	vncipherlast	$out0,$out0,$twk0
+
+	vmr		$twk0,$twk1		# unused tweak
+	vmr		$twk1,$twk2
+	le?vperm	$out0,$out0,$out0,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	addi		$out,$out,0x10
+	vxor		$out0,$in0,$twk2
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_zero:
+	cmpwi		$taillen,0
+	beq		Lxts_dec6x_done
+
+	lvx_u		$in0,0,$inp
+	le?vperm	$in0,$in0,$in0,$leperm
+	vxor		$out0,$in0,$twk1
+Lxts_dec6x_steal:
+	vncipher	$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Lxts_dec6x_steal
+
+	add		$inp,$inp,$taillen
+	vncipher	$out0,$out0,v24
+
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+
+	lvx_u		$in0,0,$inp
+	vncipher	$out0,$out0,v26
+
+	lvsr		$inpperm,0,$taillen	# $in5 is no more
+	vncipher	$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk1,$twk1,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out0,$out0,v30
+
+	vperm		$in0,$in0,$in0,$inpperm
+	vncipherlast	$tmp,$out0,$twk1
+
+	le?vperm	$out0,$tmp,$tmp,$leperm
+	le?stvx_u	$out0,0,$out
+	be?stvx_u	$tmp,0,$out
+
+	vxor		$out0,$out0,$out0
+	vspltisb	$out1,-1
+	vperm		$out0,$out0,$out1,$inpperm
+	vsel		$out0,$in0,$tmp,$out0
+	vxor		$out0,$out0,$twk0
+
+	subi		r30,$out,1
+	mtctr		$taillen
+Loop_xts_dec6x_steal:
+	lbzu		r0,1(r30)
+	stb		r0,16(r30)
+	bdnz		Loop_xts_dec6x_steal
+
+	li		$taillen,0
+	mtctr		$rounds
+	b		Loop_xts_dec1x		# one more time...
+
+.align	4
+Lxts_dec6x_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_dec6x_ret
+
+	vxor		$tweak,$twk0,$rndkey0
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_dec6x_ret:
+	mtlr		r11
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$seven,r10,$sp		# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,1,0x80,6,6,0
+	.long		0
+
+.align	5
+_aesp8_xts_dec5x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		_aesp8_xts_dec5x
+
+	subi		r0,$taillen,1
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+
+	andi.		r0,r0,16
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	 vxor		$twk0,$twk0,v31
+
+	sub		$inp,$inp,r0
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	 vxor		$in1,$twk1,v31
+
+	vncipher	$out0,$out0,v27
+	lvx_u		$in0,0,$inp
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	 vxor		$in2,$twk2,v31
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vxor		$in3,$twk3,v31
+
+	vncipher	$out0,$out0,v29
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$in4,$twk4,v31
+
+	vncipher	$out0,$out0,v30
+	vncipher	$out1,$out1,v30
+	vncipher	$out2,$out2,v30
+	vncipher	$out3,$out3,v30
+	vncipher	$out4,$out4,v30
+
+	vncipherlast	$out0,$out0,$twk0
+	vncipherlast	$out1,$out1,$in1
+	vncipherlast	$out2,$out2,$in2
+	vncipherlast	$out3,$out3,$in3
+	vncipherlast	$out4,$out4,$in4
+	mtctr		$rounds
+	blr
+        .long   	0
+        .byte   	0,12,0x14,0,0,0,0,0
+___
+}}	}}}
+
+my $consts=1;
+foreach(split("\n",$code)) {
+        s/\`([^\`]*)\`/eval($1)/geo;
+
+	# constants table endian-specific conversion
+	if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
+	    my $conv=$3;
+	    my @bytes=();
+
+	    # convert to endian-agnostic format
+	    if ($1 eq "long") {
+	      foreach (split(/,\s*/,$2)) {
+		my $l = /^0/?oct:int;
+		push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
+	      }
+	    } else {
+		@bytes = map(/^0/?oct:int,split(/,\s*/,$2));
+	    }
+
+	    # little-endian conversion
+	    if ($flavour =~ /le$/o) {
+		SWITCH: for($conv)  {
+		    /\?inv/ && do   { @bytes=map($_^0xf,@bytes); last; };
+		    /\?rev/ && do   { @bytes=reverse(@bytes);    last; };
+		}
+	    }
+
+	    #emit
+	    print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
+	    next;
+	}
+	$consts=0 if (m/Lconsts:/o);	# end of table
+
+	# instructions prefixed with '?' are endian-specific and need
+	# to be adjusted accordingly...
+	if ($flavour =~ /le$/o) {	# little-endian
+	    s/le\?//o		or
+	    s/be\?/#be#/o	or
+	    s/\?lvsr/lvsl/o	or
+	    s/\?lvsl/lvsr/o	or
+	    s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
+	    s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
+	    s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
+	} else {			# big-endian
+	    s/le\?/#le#/o	or
+	    s/be\?//o		or
+	    s/\?([a-z]+)/$1/o;
+	}
+
+        print $_,"\n";
+}
+
+close STDOUT;
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
new file mode 100644
index 0000000..dd8b871
--- /dev/null
+++ b/drivers/crypto/vmx/ghash.c
@@ -0,0 +1,225 @@
+/**
+ * GHASH routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/ghash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+#include <crypto/b128ops.h>
+
+#define IN_INTERRUPT in_interrupt()
+
+void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
+void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
+void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
+		  const u8 *in, size_t len);
+
+struct p8_ghash_ctx {
+	u128 htable[16];
+	struct crypto_shash *fallback;
+};
+
+struct p8_ghash_desc_ctx {
+	u64 shash[2];
+	u8 buffer[GHASH_DIGEST_SIZE];
+	int bytes;
+	struct shash_desc fallback_desc;
+};
+
+static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+{
+	const char *alg = "ghash-generic";
+	struct crypto_shash *fallback;
+	struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(fallback)) {
+		printk(KERN_ERR
+		       "Failed to allocate transformation for '%s': %ld\n",
+		       alg, PTR_ERR(fallback));
+		return PTR_ERR(fallback);
+	}
+
+	crypto_shash_set_flags(fallback,
+			       crypto_shash_get_flags((struct crypto_shash
+						       *) tfm));
+
+	/* Check if the descsize defined in the algorithm is still enough. */
+	if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
+	    + crypto_shash_descsize(fallback)) {
+		printk(KERN_ERR
+		       "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
+		       alg,
+		       shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
+		       crypto_shash_descsize(fallback));
+		return -EINVAL;
+	}
+	ctx->fallback = fallback;
+
+	return 0;
+}
+
+static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
+{
+	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (ctx->fallback) {
+		crypto_free_shash(ctx->fallback);
+		ctx->fallback = NULL;
+	}
+}
+
+static int p8_ghash_init(struct shash_desc *desc)
+{
+	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+	struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+	dctx->bytes = 0;
+	memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+	dctx->fallback_desc.tfm = ctx->fallback;
+	dctx->fallback_desc.flags = desc->flags;
+	return crypto_shash_init(&dctx->fallback_desc);
+}
+
+static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
+
+	if (keylen != GHASH_BLOCK_SIZE)
+		return -EINVAL;
+
+	preempt_disable();
+	pagefault_disable();
+	enable_kernel_vsx();
+	gcm_init_p8(ctx->htable, (const u64 *) key);
+	disable_kernel_vsx();
+	pagefault_enable();
+	preempt_enable();
+	return crypto_shash_setkey(ctx->fallback, key, keylen);
+}
+
+static int p8_ghash_update(struct shash_desc *desc,
+			   const u8 *src, unsigned int srclen)
+{
+	unsigned int len;
+	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+	struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+	if (IN_INTERRUPT) {
+		return crypto_shash_update(&dctx->fallback_desc, src,
+					   srclen);
+	} else {
+		if (dctx->bytes) {
+			if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+				memcpy(dctx->buffer + dctx->bytes, src,
+				       srclen);
+				dctx->bytes += srclen;
+				return 0;
+			}
+			memcpy(dctx->buffer + dctx->bytes, src,
+			       GHASH_DIGEST_SIZE - dctx->bytes);
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
+			gcm_ghash_p8(dctx->shash, ctx->htable,
+				     dctx->buffer, GHASH_DIGEST_SIZE);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+			src += GHASH_DIGEST_SIZE - dctx->bytes;
+			srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+			dctx->bytes = 0;
+		}
+		len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+		if (len) {
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
+			gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+			src += len;
+			srclen -= len;
+		}
+		if (srclen) {
+			memcpy(dctx->buffer, src, srclen);
+			dctx->bytes = srclen;
+		}
+		return 0;
+	}
+}
+
+static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+{
+	int i;
+	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+	struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+	if (IN_INTERRUPT) {
+		return crypto_shash_final(&dctx->fallback_desc, out);
+	} else {
+		if (dctx->bytes) {
+			for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+				dctx->buffer[i] = 0;
+			preempt_disable();
+			pagefault_disable();
+			enable_kernel_vsx();
+			gcm_ghash_p8(dctx->shash, ctx->htable,
+				     dctx->buffer, GHASH_DIGEST_SIZE);
+			disable_kernel_vsx();
+			pagefault_enable();
+			preempt_enable();
+			dctx->bytes = 0;
+		}
+		memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+		return 0;
+	}
+}
+
+struct shash_alg p8_ghash_alg = {
+	.digestsize = GHASH_DIGEST_SIZE,
+	.init = p8_ghash_init,
+	.update = p8_ghash_update,
+	.final = p8_ghash_final,
+	.setkey = p8_ghash_setkey,
+	.descsize = sizeof(struct p8_ghash_desc_ctx)
+		+ sizeof(struct ghash_desc_ctx),
+	.base = {
+		 .cra_name = "ghash",
+		 .cra_driver_name = "p8_ghash",
+		 .cra_priority = 1000,
+		 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+		 .cra_blocksize = GHASH_BLOCK_SIZE,
+		 .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+		 .cra_module = THIS_MODULE,
+		 .cra_init = p8_ghash_init_tfm,
+		 .cra_exit = p8_ghash_exit_tfm,
+	},
+};
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
new file mode 100644
index 0000000..38b0650
--- /dev/null
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -0,0 +1,243 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from the OpenSSL project but the author (Andy Polyakov)
+# has relicensed it under the GPLv2. Therefore this program is free software;
+# you can redistribute it and/or modify it under the terms of the GNU General
+# Public License version 2 as published by the Free Software Foundation.
+#
+# The original headers, including the original license headers, are
+# included below for completeness.
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# GHASH for for PowerISA v2.07.
+#
+# July 2014
+#
+# Accurate performance measurements are problematic, because it's
+# always virtualized setup with possibly throttled processor.
+# Relative comparison is therefore more informative. This initial
+# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
+# faster than "4-bit" integer-only compiler-generated 64-bit code.
+# "Initial version" means that there is room for futher improvement.
+
+$flavour=shift;
+$output =shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T=8;
+	$LRSAVE=2*$SIZE_T;
+	$STU="stdu";
+	$POP="ld";
+	$PUSH="std";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T=4;
+	$LRSAVE=$SIZE_T;
+	$STU="stwu";
+	$POP="lwz";
+	$PUSH="stw";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
+
+my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6));	# argument block
+
+my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
+my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
+my $vrsave="r12";
+
+$code=<<___;
+.machine	"any"
+
+.text
+
+.globl	.gcm_init_p8
+	lis		r0,0xfff0
+	li		r8,0x10
+	mfspr		$vrsave,256
+	li		r9,0x20
+	mtspr		256,r0
+	li		r10,0x30
+	lvx_u		$H,0,r4			# load H
+	le?xor		r7,r7,r7
+	le?addi		r7,r7,0x8		# need a vperm start with 08
+	le?lvsr		5,0,r7
+	le?vspltisb	6,0x0f
+	le?vxor		5,5,6			# set a b-endian mask
+	le?vperm	$H,$H,$H,5
+
+	vspltisb	$xC2,-16		# 0xf0
+	vspltisb	$t0,1			# one
+	vaddubm		$xC2,$xC2,$xC2		# 0xe0
+	vxor		$zero,$zero,$zero
+	vor		$xC2,$xC2,$t0		# 0xe1
+	vsldoi		$xC2,$xC2,$zero,15	# 0xe1...
+	vsldoi		$t1,$zero,$t0,1		# ...1
+	vaddubm		$xC2,$xC2,$xC2		# 0xc2...
+	vspltisb	$t2,7
+	vor		$xC2,$xC2,$t1		# 0xc2....01
+	vspltb		$t1,$H,0		# most significant byte
+	vsl		$H,$H,$t0		# H<<=1
+	vsrab		$t1,$t1,$t2		# broadcast carry bit
+	vand		$t1,$t1,$xC2
+	vxor		$H,$H,$t1		# twisted H
+
+	vsldoi		$H,$H,$H,8		# twist even more ...
+	vsldoi		$xC2,$zero,$xC2,8	# 0xc2.0
+	vsldoi		$Hl,$zero,$H,8		# ... and split
+	vsldoi		$Hh,$H,$zero,8
+
+	stvx_u		$xC2,0,r3		# save pre-computed table
+	stvx_u		$Hl,r8,r3
+	stvx_u		$H, r9,r3
+	stvx_u		$Hh,r10,r3
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,2,0
+	.long		0
+.size	.gcm_init_p8,.-.gcm_init_p8
+
+.globl	.gcm_gmult_p8
+	lis		r0,0xfff8
+	li		r8,0x10
+	mfspr		$vrsave,256
+	li		r9,0x20
+	mtspr		256,r0
+	li		r10,0x30
+	lvx_u		$IN,0,$Xip		# load Xi
+
+	lvx_u		$Hl,r8,$Htbl		# load pre-computed table
+	 le?lvsl	$lemask,r0,r0
+	lvx_u		$H, r9,$Htbl
+	 le?vspltisb	$t0,0x07
+	lvx_u		$Hh,r10,$Htbl
+	 le?vxor	$lemask,$lemask,$t0
+	lvx_u		$xC2,0,$Htbl
+	 le?vperm	$IN,$IN,$IN,$lemask
+	vxor		$zero,$zero,$zero
+
+	vpmsumd		$Xl,$IN,$Hl		# H.lo·Xi.lo
+	vpmsumd		$Xm,$IN,$H		# H.hi·Xi.lo+H.lo·Xi.hi
+	vpmsumd		$Xh,$IN,$Hh		# H.hi·Xi.hi
+
+	vpmsumd		$t2,$Xl,$xC2		# 1st phase
+
+	vsldoi		$t0,$Xm,$zero,8
+	vsldoi		$t1,$zero,$Xm,8
+	vxor		$Xl,$Xl,$t0
+	vxor		$Xh,$Xh,$t1
+
+	vsldoi		$Xl,$Xl,$Xl,8
+	vxor		$Xl,$Xl,$t2
+
+	vsldoi		$t1,$Xl,$Xl,8		# 2nd phase
+	vpmsumd		$Xl,$Xl,$xC2
+	vxor		$t1,$t1,$Xh
+	vxor		$Xl,$Xl,$t1
+
+	le?vperm	$Xl,$Xl,$Xl,$lemask
+	stvx_u		$Xl,0,$Xip		# write out Xi
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,2,0
+	.long		0
+.size	.gcm_gmult_p8,.-.gcm_gmult_p8
+
+.globl	.gcm_ghash_p8
+	lis		r0,0xfff8
+	li		r8,0x10
+	mfspr		$vrsave,256
+	li		r9,0x20
+	mtspr		256,r0
+	li		r10,0x30
+	lvx_u		$Xl,0,$Xip		# load Xi
+
+	lvx_u		$Hl,r8,$Htbl		# load pre-computed table
+	 le?lvsl	$lemask,r0,r0
+	lvx_u		$H, r9,$Htbl
+	 le?vspltisb	$t0,0x07
+	lvx_u		$Hh,r10,$Htbl
+	 le?vxor	$lemask,$lemask,$t0
+	lvx_u		$xC2,0,$Htbl
+	 le?vperm	$Xl,$Xl,$Xl,$lemask
+	vxor		$zero,$zero,$zero
+
+	lvx_u		$IN,0,$inp
+	addi		$inp,$inp,16
+	subi		$len,$len,16
+	 le?vperm	$IN,$IN,$IN,$lemask
+	vxor		$IN,$IN,$Xl
+	b		Loop
+
+.align	5
+Loop:
+	 subic		$len,$len,16
+	vpmsumd		$Xl,$IN,$Hl		# H.lo·Xi.lo
+	 subfe.		r0,r0,r0		# borrow?-1:0
+	vpmsumd		$Xm,$IN,$H		# H.hi·Xi.lo+H.lo·Xi.hi
+	 and		r0,r0,$len
+	vpmsumd		$Xh,$IN,$Hh		# H.hi·Xi.hi
+	 add		$inp,$inp,r0
+
+	vpmsumd		$t2,$Xl,$xC2		# 1st phase
+
+	vsldoi		$t0,$Xm,$zero,8
+	vsldoi		$t1,$zero,$Xm,8
+	vxor		$Xl,$Xl,$t0
+	vxor		$Xh,$Xh,$t1
+
+	vsldoi		$Xl,$Xl,$Xl,8
+	vxor		$Xl,$Xl,$t2
+	 lvx_u		$IN,0,$inp
+	 addi		$inp,$inp,16
+
+	vsldoi		$t1,$Xl,$Xl,8		# 2nd phase
+	vpmsumd		$Xl,$Xl,$xC2
+	 le?vperm	$IN,$IN,$IN,$lemask
+	vxor		$t1,$t1,$Xh
+	vxor		$IN,$IN,$t1
+	vxor		$IN,$IN,$Xl
+	beq		Loop			# did $len-=16 borrow?
+
+	vxor		$Xl,$Xl,$t1
+	le?vperm	$Xl,$Xl,$Xl,$lemask
+	stvx_u		$Xl,0,$Xip		# write out Xi
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,4,0
+	.long		0
+.size	.gcm_ghash_p8,.-.gcm_ghash_p8
+
+.asciz  "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+.align  2
+___
+
+foreach (split("\n",$code)) {
+	if ($flavour =~ /le$/o) {	# little-endian
+	    s/le\?//o		or
+	    s/be\?/#be#/o;
+	} else {
+	    s/le\?/#le#/o	or
+	    s/be\?//o;
+	}
+	print $_,"\n";
+}
+
+close STDOUT; # enforce flush
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
new file mode 100644
index 0000000..36db2ef
--- /dev/null
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -0,0 +1,229 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# PowerPC assembler distiller by <appro>.
+
+my $flavour = shift;
+my $output = shift;
+open STDOUT,">$output" || die "can't open $output: $!";
+
+my %GLOBALS;
+my $dotinlocallabels=($flavour=~/linux/)?1:0;
+
+################################################################
+# directives which need special treatment on different platforms
+################################################################
+my $globl = sub {
+    my $junk = shift;
+    my $name = shift;
+    my $global = \$GLOBALS{$name};
+    my $ret;
+
+    $name =~ s|^[\.\_]||;
+ 
+    SWITCH: for ($flavour) {
+	/aix/		&& do { $name = ".$name";
+				last;
+			      };
+	/osx/		&& do { $name = "_$name";
+				last;
+			      };
+	/linux/
+			&& do {	$ret = "_GLOBAL($name)";
+				last;
+			      };
+    }
+
+    $ret = ".globl	$name\nalign 5\n$name:" if (!$ret);
+    $$global = $name;
+    $ret;
+};
+my $text = sub {
+    my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text";
+    $ret = ".abiversion	2\n".$ret	if ($flavour =~ /linux.*64le/);
+    $ret;
+};
+my $machine = sub {
+    my $junk = shift;
+    my $arch = shift;
+    if ($flavour =~ /osx/)
+    {	$arch =~ s/\"//g;
+	$arch = ($flavour=~/64/) ? "ppc970-64" : "ppc970" if ($arch eq "any");
+    }
+    ".machine	$arch";
+};
+my $size = sub {
+    if ($flavour =~ /linux/)
+    {	shift;
+	my $name = shift; $name =~ s|^[\.\_]||;
+	my $ret  = ".size	$name,.-".($flavour=~/64$/?".":"").$name;
+	$ret .= "\n.size	.$name,.-.$name" if ($flavour=~/64$/);
+	$ret;
+    }
+    else
+    {	"";	}
+};
+my $asciz = sub {
+    shift;
+    my $line = join(",",@_);
+    if ($line =~ /^"(.*)"$/)
+    {	".byte	" . join(",",unpack("C*",$1),0) . "\n.align	2";	}
+    else
+    {	"";	}
+};
+my $quad = sub {
+    shift;
+    my @ret;
+    my ($hi,$lo);
+    for (@_) {
+	if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io)
+	{  $hi=$1?"0x$1":"0"; $lo="0x$2";  }
+	elsif (/^([0-9]+)$/o)
+	{  $hi=$1>>32; $lo=$1&0xffffffff;  } # error-prone with 32-bit perl
+	else
+	{  $hi=undef; $lo=$_; }
+
+	if (defined($hi))
+	{  push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo");  }
+	else
+	{  push(@ret,".quad	$lo");  }
+    }
+    join("\n",@ret);
+};
+
+################################################################
+# simplified mnemonics not handled by at least one assembler
+################################################################
+my $cmplw = sub {
+    my $f = shift;
+    my $cr = 0; $cr = shift if ($#_>1);
+    # Some out-of-date 32-bit GNU assembler just can't handle cmplw...
+    ($flavour =~ /linux.*32/) ?
+	"	.long	".sprintf "0x%x",31<<26|$cr<<23|$_[0]<<16|$_[1]<<11|64 :
+	"	cmplw	".join(',',$cr,@_);
+};
+my $bdnz = sub {
+    my $f = shift;
+    my $bo = $f=~/[\+\-]/ ? 16+9 : 16;	# optional "to be taken" hint
+    "	bc	$bo,0,".shift;
+} if ($flavour!~/linux/);
+my $bltlr = sub {
+    my $f = shift;
+    my $bo = $f=~/\-/ ? 12+2 : 12;	# optional "not to be taken" hint
+    ($flavour =~ /linux/) ?		# GNU as doesn't allow most recent hints
+	"	.long	".sprintf "0x%x",19<<26|$bo<<21|16<<1 :
+	"	bclr	$bo,0";
+};
+my $bnelr = sub {
+    my $f = shift;
+    my $bo = $f=~/\-/ ? 4+2 : 4;	# optional "not to be taken" hint
+    ($flavour =~ /linux/) ?		# GNU as doesn't allow most recent hints
+	"	.long	".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 :
+	"	bclr	$bo,2";
+};
+my $beqlr = sub {
+    my $f = shift;
+    my $bo = $f=~/-/ ? 12+2 : 12;	# optional "not to be taken" hint
+    ($flavour =~ /linux/) ?		# GNU as doesn't allow most recent hints
+	"	.long	".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 :
+	"	bclr	$bo,2";
+};
+# GNU assembler can't handle extrdi rA,rS,16,48, or when sum of last two
+# arguments is 64, with "operand out of range" error.
+my $extrdi = sub {
+    my ($f,$ra,$rs,$n,$b) = @_;
+    $b = ($b+$n)&63; $n = 64-$n;
+    "	rldicl	$ra,$rs,$b,$n";
+};
+my $vmr = sub {
+    my ($f,$vx,$vy) = @_;
+    "	vor	$vx,$vy,$vy";
+};
+
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
+my $mtspr = sub {
+    my ($f,$idx,$ra) = @_;
+    if ($idx == 256 && $no_vrsave) {
+	"	or	$ra,$ra,$ra";
+    } else {
+	"	mtspr	$idx,$ra";
+    }
+};
+my $mfspr = sub {
+    my ($f,$rd,$idx) = @_;
+    if ($idx == 256 && $no_vrsave) {
+	"	li	$rd,-1";
+    } else {
+	"	mfspr	$rd,$idx";
+    }
+};
+
+# PowerISA 2.06 stuff
+sub vsxmem_op {
+    my ($f, $vrt, $ra, $rb, $op) = @_;
+    "	.long	".sprintf "0x%X",(31<<26)|($vrt<<21)|($ra<<16)|($rb<<11)|($op*2+1);
+}
+# made-up unaligned memory reference AltiVec/VMX instructions
+my $lvx_u	= sub {	vsxmem_op(@_, 844); };	# lxvd2x
+my $stvx_u	= sub {	vsxmem_op(@_, 972); };	# stxvd2x
+my $lvdx_u	= sub {	vsxmem_op(@_, 588); };	# lxsdx
+my $stvdx_u	= sub {	vsxmem_op(@_, 716); };	# stxsdx
+my $lvx_4w	= sub { vsxmem_op(@_, 780); };	# lxvw4x
+my $stvx_4w	= sub { vsxmem_op(@_, 908); };	# stxvw4x
+
+# PowerISA 2.07 stuff
+sub vcrypto_op {
+    my ($f, $vrt, $vra, $vrb, $op) = @_;
+    "	.long	".sprintf "0x%X",(4<<26)|($vrt<<21)|($vra<<16)|($vrb<<11)|$op;
+}
+my $vcipher	= sub { vcrypto_op(@_, 1288); };
+my $vcipherlast	= sub { vcrypto_op(@_, 1289); };
+my $vncipher	= sub { vcrypto_op(@_, 1352); };
+my $vncipherlast= sub { vcrypto_op(@_, 1353); };
+my $vsbox	= sub { vcrypto_op(@_, 0, 1480); };
+my $vshasigmad	= sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1730); };
+my $vshasigmaw	= sub { my ($st,$six)=splice(@_,-2); vcrypto_op(@_, $st<<4|$six, 1666); };
+my $vpmsumb	= sub { vcrypto_op(@_, 1032); };
+my $vpmsumd	= sub { vcrypto_op(@_, 1224); };
+my $vpmsubh	= sub { vcrypto_op(@_, 1096); };
+my $vpmsumw	= sub { vcrypto_op(@_, 1160); };
+my $vaddudm	= sub { vcrypto_op(@_, 192);  };
+my $vadduqm	= sub { vcrypto_op(@_, 256);  };
+
+my $mtsle	= sub {
+    my ($f, $arg) = @_;
+    "	.long	".sprintf "0x%X",(31<<26)|($arg<<21)|(147*2);
+};
+
+print "#include <asm/ppc_asm.h>\n" if $flavour =~ /linux/;
+
+while($line=<>) {
+
+    $line =~ s|[#!;].*$||;	# get rid of asm-style comments...
+    $line =~ s|/\*.*\*/||;	# ... and C-style comments...
+    $line =~ s|^\s+||;		# ... and skip white spaces in beginning...
+    $line =~ s|\s+$||;		# ... and at the end
+
+    {
+	$line =~ s|\b\.L(\w+)|L$1|g;	# common denominator for Locallabel
+	$line =~ s|\bL(\w+)|\.L$1|g	if ($dotinlocallabels);
+    }
+
+    {
+	$line =~ s|^\s*(\.?)(\w+)([\.\+\-]?)\s*||;
+	my $c = $1; $c = "\t" if ($c eq "");
+	my $mnemonic = $2;
+	my $f = $3;
+	my $opcode = eval("\$$mnemonic");
+	$line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/);
+	if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); }
+	elsif ($mnemonic)           { $line = $c.$mnemonic.$f."\t".$line; }
+    }
+
+    print $line if ($line);
+    print "\n";
+}
+
+close STDOUT;
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
new file mode 100644
index 0000000..31a98dc
--- /dev/null
+++ b/drivers/crypto/vmx/vmx.c
@@ -0,0 +1,88 @@
+/**
+ * Routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <asm/cputable.h>
+#include <crypto/internal/hash.h>
+
+extern struct shash_alg p8_ghash_alg;
+extern struct crypto_alg p8_aes_alg;
+extern struct crypto_alg p8_aes_cbc_alg;
+extern struct crypto_alg p8_aes_ctr_alg;
+extern struct crypto_alg p8_aes_xts_alg;
+static struct crypto_alg *algs[] = {
+	&p8_aes_alg,
+	&p8_aes_cbc_alg,
+	&p8_aes_ctr_alg,
+	&p8_aes_xts_alg,
+	NULL,
+};
+
+int __init p8_init(void)
+{
+	int ret = 0;
+	struct crypto_alg **alg_it;
+
+	for (alg_it = algs; *alg_it; alg_it++) {
+		ret = crypto_register_alg(*alg_it);
+		printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
+		       (*alg_it)->cra_name, ret);
+		if (ret) {
+			for (alg_it--; alg_it >= algs; alg_it--)
+				crypto_unregister_alg(*alg_it);
+			break;
+		}
+	}
+	if (ret)
+		return ret;
+
+	ret = crypto_register_shash(&p8_ghash_alg);
+	if (ret) {
+		for (alg_it = algs; *alg_it; alg_it++)
+			crypto_unregister_alg(*alg_it);
+	}
+	return ret;
+}
+
+void __exit p8_exit(void)
+{
+	struct crypto_alg **alg_it;
+
+	for (alg_it = algs; *alg_it; alg_it++) {
+		printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name);
+		crypto_unregister_alg(*alg_it);
+	}
+	crypto_unregister_shash(&p8_ghash_alg);
+}
+
+module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, p8_init);
+module_exit(p8_exit);
+
+MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>");
+MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions "
+		   "support on Power 8");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");