v4.19.13 snapshot.
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
new file mode 100644
index 0000000..1eb8527
--- /dev/null
+++ b/drivers/crypto/caam/Kconfig
@@ -0,0 +1,154 @@
+config CRYPTO_DEV_FSL_CAAM
+	tristate "Freescale CAAM-Multicore driver backend"
+	depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+	select SOC_BUS
+	help
+	  Enables the driver module for Freescale's Cryptographic Accelerator
+	  and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
+	  This module creates job ring devices, and configures h/w
+	  to operate as a DPAA component automatically, depending
+	  on h/w feature availability.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called caam.
+
+config CRYPTO_DEV_FSL_CAAM_JR
+	tristate "Freescale CAAM Job Ring driver backend"
+	depends on CRYPTO_DEV_FSL_CAAM
+	default y
+	help
+	  Enables the driver module for Job Rings which are part of
+	  Freescale's Cryptographic Accelerator
+	  and Assurance Module (CAAM). This module adds a job ring operation
+	  interface.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called caam_jr.
+
+config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+	int "Job Ring size"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	range 2 9
+	default "9"
+	help
+	  Select size of Job Rings as a power of 2, within the
+	  range 2-9 (ring size 4-512).
+	  Examples:
+		2 => 4
+		3 => 8
+		4 => 16
+		5 => 32
+		6 => 64
+		7 => 128
+		8 => 256
+		9 => 512
+
+config CRYPTO_DEV_FSL_CAAM_INTC
+	bool "Job Ring interrupt coalescing"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	help
+	  Enable the Job Ring's interrupt coalescing feature.
+
+	  Note: the driver already provides adequate
+	  interrupt coalescing in software.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+	int "Job Ring interrupt coalescing count threshold"
+	depends on CRYPTO_DEV_FSL_CAAM_INTC
+	range 1 255
+	default 255
+	help
+	  Select number of descriptor completions to queue before
+	  raising an interrupt, in the range 1-255. Note that a selection
+	  of 1 functionally defeats the coalescing feature, and a selection
+	  equal or greater than the job ring size will force timeouts.
+
+config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+	int "Job Ring interrupt coalescing timer threshold"
+	depends on CRYPTO_DEV_FSL_CAAM_INTC
+	range 1 65535
+	default 2048
+	help
+	  Select number of bus clocks/64 to timeout in the case that one or
+	  more descriptor completions are queued without reaching the count
+	  threshold. Range is 1-65535.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+	tristate "Register algorithm implementations with the Crypto API"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	default y
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	help
+	  Selecting this will offload crypto for users of the
+	  scatterlist crypto API (such as the linux native IPSec
+	  stack) to the SEC4 via job ring.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamalg.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+	tristate "Queue Interface as Crypto API backend"
+	depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
+	default y
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	help
+	  Selecting this will use CAAM Queue Interface (QI) for sending
+	  & receiving crypto jobs to/from CAAM. This gives better performance
+	  than job ring interface when the number of cores are more than the
+	  number of job rings assigned to the kernel. The number of portals
+	  assigned to the kernel should also be more than the number of
+	  job rings.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamalg_qi.
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API
+	tristate "Register hash algorithm implementations with Crypto API"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	default y
+	select CRYPTO_HASH
+	help
+	  Selecting this will offload ahash for users of the
+	  scatterlist crypto API to the SEC4 via job ring.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamhash.
+
+config CRYPTO_DEV_FSL_CAAM_PKC_API
+        tristate "Register public key cryptography implementations with Crypto API"
+        depends on CRYPTO_DEV_FSL_CAAM_JR
+        default y
+        select CRYPTO_RSA
+        help
+          Selecting this will allow SEC Public key support for RSA.
+          Supported cryptographic primitives: encryption, decryption,
+          signature and verification.
+          To compile this as a module, choose M here: the module
+          will be called caam_pkc.
+
+config CRYPTO_DEV_FSL_CAAM_RNG_API
+	tristate "Register caam device for hwrng API"
+	depends on CRYPTO_DEV_FSL_CAAM_JR
+	default y
+	select CRYPTO_RNG
+	select HW_RANDOM
+	help
+	  Selecting this will register the SEC4 hardware rng to
+	  the hw_random API for suppying the kernel entropy pool.
+
+	  To compile this as a module, choose M here: the module
+	  will be called caamrng.
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+	bool "Enable debug output in CAAM driver"
+	depends on CRYPTO_DEV_FSL_CAAM
+	help
+	  Selecting this will enable printing of various debug
+	  information in the CAAM driver.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+	def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
+		      CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
new file mode 100644
index 0000000..cb652ee
--- /dev/null
+++ b/drivers/crypto/caam/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the CAAM backend and dependent components
+#
+ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
+	ccflags-y := -DDEBUG
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
+
+caam-objs := ctrl.o
+caam_jr-objs := jr.o key_gen.o error.o
+caam_pkc-y := caampkc.o pkc_desc.o
+ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
+	ccflags-y += -DCONFIG_CAAM_QI
+	caam-objs += qi.o
+endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
new file mode 100644
index 0000000..ec40f99
--- /dev/null
+++ b/drivers/crypto/caam/caamalg.c
@@ -0,0 +1,3561 @@
+/*
+ * caam - Freescale FSL CAAM support for crypto API
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ * Based on talitos crypto API driver.
+ *
+ * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
+ *
+ * ---------------                     ---------------
+ * | JobDesc #1  |-------------------->|  ShareDesc  |
+ * | *(packet 1) |                     |   (PDB)     |
+ * ---------------      |------------->|  (hashKey)  |
+ *       .              |              | (cipherKey) |
+ *       .              |    |-------->| (operation) |
+ * ---------------      |    |         ---------------
+ * | JobDesc #2  |------|    |
+ * | *(packet 2) |           |
+ * ---------------           |
+ *       .                   |
+ *       .                   |
+ * ---------------           |
+ * | JobDesc #3  |------------
+ * | *(packet 3) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header            |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR       |
+ * | (output buffer)   |
+ * | (output length)   |
+ * | SEQ_IN_PTR        |
+ * | (input buffer)    |
+ * | (input length)    |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY		3000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
+					 CTR_RFC3686_NONCE_SIZE + \
+					 SHA512_DIGEST_SIZE * 2)
+
+#define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
+#define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
+					 CAAM_CMD_SZ * 4)
+#define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
+					 CAAM_CMD_SZ * 5)
+
+#define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
+#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+static struct list_head alg_list;
+
+struct caam_alg_entry {
+	int class1_alg_type;
+	int class2_alg_type;
+	bool rfc3686;
+	bool geniv;
+};
+
+struct caam_aead_alg {
+	struct aead_alg aead;
+	struct caam_alg_entry caam;
+	bool registered;
+};
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+	u32 sh_desc_enc[DESC_MAX_USED_LEN];
+	u32 sh_desc_dec[DESC_MAX_USED_LEN];
+	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+	u8 key[CAAM_MAX_KEY_SIZE];
+	dma_addr_t sh_desc_enc_dma;
+	dma_addr_t sh_desc_dec_dma;
+	dma_addr_t sh_desc_givenc_dma;
+	dma_addr_t key_dma;
+	enum dma_data_direction dir;
+	struct device *jrdev;
+	struct alginfo adata;
+	struct alginfo cdata;
+	unsigned int authsize;
+};
+
+static int aead_null_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+			ctx->adata.keylen_pad;
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+		ctx->adata.key_inline = true;
+		ctx->adata.key_virt = ctx->key;
+	} else {
+		ctx->adata.key_inline = false;
+		ctx->adata.key_dma = ctx->key_dma;
+	}
+
+	/* aead_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+				    ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+		ctx->adata.key_inline = true;
+		ctx->adata.key_virt = ctx->key;
+	} else {
+		ctx->adata.key_inline = false;
+		ctx->adata.key_dma = ctx->key_dma;
+	}
+
+	/* aead_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+				    ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 struct caam_aead_alg, aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	u32 ctx1_iv_off = 0;
+	u32 *desc, *nonce = NULL;
+	u32 inl_mask;
+	unsigned int data_len[2];
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = alg->caam.rfc3686;
+
+	if (!ctx->authsize)
+		return 0;
+
+	/* NULL encryption / decryption */
+	if (!ctx->cdata.keylen)
+		return aead_null_set_sh_desc(aead);
+
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+	}
+
+	data_len[0] = ctx->adata.keylen_pad;
+	data_len[1] = ctx->cdata.keylen;
+
+	if (alg->caam.geniv)
+		goto skip_enc;
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (desc_inline_query(DESC_AEAD_ENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	/* aead_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
+			       ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
+			       false, ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+skip_enc:
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (desc_inline_query(DESC_AEAD_DEC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	/* aead_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+			       ctx->authsize, alg->caam.geniv, is_rfc3686,
+			       nonce, ctx1_iv_off, false, ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	if (!alg->caam.geniv)
+		goto skip_givenc;
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	/* aead_givencrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+				  ctx->authsize, is_rfc3686, nonce,
+				  ctx1_iv_off, false, ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+skip_givenc:
+	return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc,
+				    unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	aead_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * AES GCM encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_GCM_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_GCM_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	gcm_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * RFC4106 encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+				  false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+				  false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4106_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * RFC4543 encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+				  false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+				  false);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4543_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead,
+			       const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	struct crypto_authenc_keys keys;
+	int ret = 0;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+#ifdef DEBUG
+	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
+	       keys.authkeylen);
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	/*
+	 * If DKP is supported, use it in the shared descriptor to generate
+	 * the split key.
+	 */
+	if (ctrlpriv->era >= 6) {
+		ctx->adata.keylen = keys.authkeylen;
+		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+						      OP_ALG_ALGSEL_MASK);
+
+		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+			goto badkey;
+
+		memcpy(ctx->key, keys.authkey, keys.authkeylen);
+		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+		       keys.enckeylen);
+		dma_sync_single_for_device(jrdev, ctx->key_dma,
+					   ctx->adata.keylen_pad +
+					   keys.enckeylen, ctx->dir);
+		goto skip_split_key;
+	}
+
+	ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
+			    keys.enckeylen);
+	if (ret) {
+		goto badkey;
+	}
+
+	/* postpend encryption key to auth split key */
+	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+				   keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+		       ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+skip_split_key:
+	ctx->cdata.keylen = keys.enckeylen;
+	memzero_explicit(&keys, sizeof(keys));
+	return aead_set_sh_desc(aead);
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+		      const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+	ctx->cdata.keylen = keylen;
+
+	return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->cdata.keylen = keylen - 4;
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+				   ctx->dir);
+	return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->cdata.keylen = keylen - 4;
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+				   ctx->dir);
+	return rfc4543_set_sh_desc(aead);
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc;
+	u32 ctx1_iv_off = 0;
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode &&
+				 (strstr(alg_name, "rfc3686") != NULL));
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 *	| *key = {KEY, NONCE}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		keylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = key;
+	ctx->cdata.key_inline = true;
+
+	/* ablkcipher_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+				     ctx1_iv_off);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/* ablkcipher_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+				     ctx1_iv_off);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/* ablkcipher_givencrypt shared descriptor */
+	desc = ctx->sh_desc_givenc;
+	cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+					ctx1_iv_off);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+				 const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+
+	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(ablkcipher,
+					    CRYPTO_TFM_RES_BAD_KEY_LEN);
+		dev_err(jrdev, "key size mismatch\n");
+		return -EINVAL;
+	}
+
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = key;
+	ctx->cdata.key_inline = true;
+
+	/* xts_ablkcipher_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	/* xts_ablkcipher_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+				   desc_bytes(desc), ctx->dir);
+
+	return 0;
+}
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input s/w scatterlist
+ * @dst_nents: number of segments in output s/w scatterlist
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct aead_edesc {
+	int src_nents;
+	int dst_nents;
+	int sec4_sg_bytes;
+	dma_addr_t sec4_sg_dma;
+	struct sec4_sg_entry *sec4_sg;
+	u32 hw_desc[];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input s/w scatterlist
+ * @dst_nents: number of segments in output s/w scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @iv_dir: DMA mapping direction for IV
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ *	     and IV
+ */
+struct ablkcipher_edesc {
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	enum dma_data_direction iv_dir;
+	int sec4_sg_bytes;
+	dma_addr_t sec4_sg_dma;
+	struct sec4_sg_entry *sec4_sg;
+	u32 hw_desc[0];
+};
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+		       struct scatterlist *dst, int src_nents,
+		       int dst_nents,
+		       dma_addr_t iv_dma, int ivsize,
+		       enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+		       int sec4_sg_bytes)
+{
+	if (dst != src) {
+		if (src_nents)
+			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+	}
+
+	if (iv_dma)
+		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+	if (sec4_sg_bytes)
+		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
+				 DMA_TO_DEVICE);
+}
+
+static void aead_unmap(struct device *dev,
+		       struct aead_edesc *edesc,
+		       struct aead_request *req)
+{
+	caam_unmap(dev, req->src, req->dst,
+		   edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+			     struct ablkcipher_edesc *edesc,
+			     struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	caam_unmap(dev, req->src, req->dst,
+		   edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, edesc->iv_dir,
+		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct aead_request *req = context;
+	struct aead_edesc *edesc;
+
+#ifdef DEBUG
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
+
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	aead_unmap(jrdev, edesc, req);
+
+	kfree(edesc);
+
+	aead_request_complete(req, err);
+}
+
+static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct aead_request *req = context;
+	struct aead_edesc *edesc;
+
+#ifdef DEBUG
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
+
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	aead_unmap(jrdev, edesc, req);
+
+	/*
+	 * verify hw auth check passed else return -EBADMSG
+	 */
+	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
+		err = -EBADMSG;
+
+	kfree(edesc);
+
+	aead_request_complete(req, err);
+}
+
+static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				   void *context)
+{
+	struct ablkcipher_request *req = context;
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+#ifdef DEBUG
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       edesc->src_nents > 1 ? 100 : ivsize, 1);
+#endif
+	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+
+	ablkcipher_unmap(jrdev, edesc, req);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block. This is used e.g. by the CTS mode.
+	 */
+	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+				 ivsize, 0);
+
+	/* In case initial IV was generated, copy it in GIVCIPHER request */
+	if (edesc->iv_dir == DMA_FROM_DEVICE) {
+		u8 *iv;
+		struct skcipher_givcrypt_request *greq;
+
+		greq = container_of(req, struct skcipher_givcrypt_request,
+				    creq);
+		iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
+		     edesc->sec4_sg_bytes;
+		memcpy(greq->giv, iv, ivsize);
+	}
+
+	kfree(edesc);
+
+	ablkcipher_request_complete(req, err);
+}
+
+static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+				    void *context)
+{
+	struct ablkcipher_request *req = context;
+	struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+#endif
+	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+
+	ablkcipher_unmap(jrdev, edesc, req);
+	kfree(edesc);
+
+	ablkcipher_request_complete(req, err);
+}
+
+/*
+ * Fill in aead job descriptor
+ */
+static void init_aead_job(struct aead_request *req,
+			  struct aead_edesc *edesc,
+			  bool all_contig, bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	int authsize = ctx->authsize;
+	u32 *desc = edesc->hw_desc;
+	u32 out_options, in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, sec4_sg_index = 0;
+	dma_addr_t ptr;
+	u32 *sh_desc;
+
+	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (all_contig) {
+		src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
+		in_options = 0;
+	} else {
+		src_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+		in_options = LDST_SGF;
+	}
+
+	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
+			  in_options);
+
+	dst_dma = src_dma;
+	out_options = in_options;
+
+	if (unlikely(req->src != req->dst)) {
+		if (edesc->dst_nents == 1) {
+			dst_dma = sg_dma_address(req->dst);
+		} else {
+			dst_dma = edesc->sec4_sg_dma +
+				  sec4_sg_index *
+				  sizeof(struct sec4_sg_entry);
+			out_options = LDST_SGF;
+		}
+	}
+
+	if (encrypt)
+		append_seq_out_ptr(desc, dst_dma,
+				   req->assoclen + req->cryptlen + authsize,
+				   out_options);
+	else
+		append_seq_out_ptr(desc, dst_dma,
+				   req->assoclen + req->cryptlen - authsize,
+				   out_options);
+}
+
+static void init_gcm_job(struct aead_request *req,
+			 struct aead_edesc *edesc,
+			 bool all_contig, bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 *desc = edesc->hw_desc;
+	bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
+	unsigned int last;
+
+	init_aead_job(req, edesc, all_contig, encrypt);
+	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+
+	/* BUG This should not be specific to generic GCM. */
+	last = 0;
+	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
+		last = FIFOLD_TYPE_LAST1;
+
+	/* Read GCM IV */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
+	/* Append Salt */
+	if (!generic_gcm)
+		append_data(desc, ctx->key + ctx->cdata.keylen, 4);
+	/* Append IV */
+	append_data(desc, req->iv, ivsize);
+	/* End of blank commands */
+}
+
+static void init_authenc_job(struct aead_request *req,
+			     struct aead_edesc *edesc,
+			     bool all_contig, bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 struct caam_aead_alg, aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = alg->caam.rfc3686;
+	u32 *desc = edesc->hw_desc;
+	u32 ivoffset = 0;
+
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ivoffset = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686)
+		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
+
+	init_aead_job(req, edesc, all_contig, encrypt);
+
+	/*
+	 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
+	 * having DPOVRD as destination.
+	 */
+	if (ctrlpriv->era < 3)
+		append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+	else
+		append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
+
+	if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
+		append_load_as_imm(desc, req->iv, ivsize,
+				   LDST_CLASS_1_CCB |
+				   LDST_SRCDST_BYTE_CONTEXT |
+				   (ivoffset << LDST_OFFSET_SHIFT));
+}
+
+/*
+ * Fill in ablkcipher job descriptor
+ */
+static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+				struct ablkcipher_edesc *edesc,
+				struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc = edesc->hw_desc;
+	u32 out_options = 0;
+	dma_addr_t dst_dma;
+	int len;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+	pr_err("asked=%d, nbytes%d\n",
+	       (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+#endif
+	caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		     edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+			  LDST_SGF);
+
+	if (likely(req->src == req->dst)) {
+		dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
+		out_options = LDST_SGF;
+	} else {
+		if (edesc->dst_nents == 1) {
+			dst_dma = sg_dma_address(req->dst);
+		} else {
+			dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
+				  sizeof(struct sec4_sg_entry);
+			out_options = LDST_SGF;
+		}
+	}
+	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
+}
+
+/*
+ * Fill in ablkcipher givencrypt job descriptor
+ */
+static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+				    struct ablkcipher_edesc *edesc,
+				    struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc = edesc->hw_desc;
+	u32 in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+#endif
+	caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		     edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (edesc->src_nents == 1) {
+		src_dma = sg_dma_address(req->src);
+		in_options = 0;
+	} else {
+		src_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+		in_options = LDST_SGF;
+	}
+	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+
+	dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+		  sizeof(struct sec4_sg_entry);
+	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+}
+
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+					   int desc_bytes, bool *all_contig_ptr,
+					   bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct aead_edesc *edesc;
+	int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
+	unsigned int authsize = ctx->authsize;
+
+	if (unlikely(req->dst != req->src)) {
+		src_nents = sg_nents_for_len(req->src, req->assoclen +
+					     req->cryptlen);
+		if (unlikely(src_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+				req->assoclen + req->cryptlen);
+			return ERR_PTR(src_nents);
+		}
+
+		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+					     req->cryptlen +
+						(encrypt ? authsize :
+							   (-authsize)));
+		if (unlikely(dst_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+				req->assoclen + req->cryptlen +
+				(encrypt ? authsize : (-authsize)));
+			return ERR_PTR(dst_nents);
+		}
+	} else {
+		src_nents = sg_nents_for_len(req->src, req->assoclen +
+					     req->cryptlen +
+					     (encrypt ? authsize : 0));
+		if (unlikely(src_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+				req->assoclen + req->cryptlen +
+				(encrypt ? authsize : 0));
+			return ERR_PTR(src_nents);
+		}
+	}
+
+	if (likely(req->src == req->dst)) {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		/* Cover also the case of null (zero length) input data */
+		if (src_nents) {
+			mapped_src_nents = dma_map_sg(jrdev, req->src,
+						      src_nents, DMA_TO_DEVICE);
+			if (unlikely(!mapped_src_nents)) {
+				dev_err(jrdev, "unable to map source\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		} else {
+			mapped_src_nents = 0;
+		}
+
+		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(jrdev, "unable to map destination\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
+	sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+			GFP_DMA | flags);
+	if (!edesc) {
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+			 desc_bytes;
+	*all_contig_ptr = !(mapped_src_nents > 1);
+
+	sec4_sg_index = 0;
+	if (mapped_src_nents > 1) {
+		sg_to_sec4_sg_last(req->src, mapped_src_nents,
+				   edesc->sec4_sg + sec4_sg_index, 0);
+		sec4_sg_index += mapped_src_nents;
+	}
+	if (mapped_dst_nents > 1) {
+		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+				   edesc->sec4_sg + sec4_sg_index, 0);
+	}
+
+	if (!sec4_sg_bytes)
+		return edesc;
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+	return edesc;
+}
+
+static int gcm_encrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor */
+	init_gcm_job(req, edesc, all_contig, true);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_encrypt(req);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+				 &all_contig, true);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor */
+	init_authenc_job(req, edesc, all_contig, true);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int gcm_decrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_gcm_job(req, edesc, all_contig, false);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_decrypt(req);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool all_contig;
+	u32 *desc;
+	int ret = 0;
+
+	caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+		     req->assoclen + req->cryptlen, 1);
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+				 &all_contig, false);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_authenc_job(req, edesc, all_contig, false);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+						       *req, int desc_bytes)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	u8 *iv;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (req->dst != req->src) {
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+	}
+
+	if (likely(req->src == req->dst)) {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(jrdev, "unable to map destination\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	sec4_sg_ents = 1 + mapped_src_nents;
+	dst_sg_idx = sec4_sg_ents;
+	sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+
+	/*
+	 * allocate space for base edesc and hw desc commands, link tables, IV
+	 */
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+			GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+	edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+						  desc_bytes);
+	edesc->iv_dir = DMA_TO_DEVICE;
+
+	/* Make sure IV is located in a DMAable area */
+	iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+	memcpy(iv, req->info, ivsize);
+
+	iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+	sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+
+	if (mapped_dst_nents > 1) {
+		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+				   edesc->sec4_sg + dst_sg_idx, 0);
+	}
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+		       sec4_sg_bytes, 1);
+#endif
+
+	return edesc;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block.
+	 */
+	scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+				 ivsize, 0);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+	desc = edesc->hw_desc;
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+/*
+ * allocate and map the ablkcipher extended descriptor
+ * for ablkcipher givencrypt
+ */
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+				struct skcipher_givcrypt_request *greq,
+				int desc_bytes)
+{
+	struct ablkcipher_request *req = &greq->creq;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags &  CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	u8 *iv;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (likely(req->src == req->dst)) {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		dst_nents = src_nents;
+		mapped_dst_nents = src_nents;
+	} else {
+		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+
+		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(jrdev, "unable to map destination\n");
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+	dst_sg_idx = sec4_sg_ents;
+	sec4_sg_ents += 1 + mapped_dst_nents;
+
+	/*
+	 * allocate space for base edesc and hw desc commands, link tables, IV
+	 */
+	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+			GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+	edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+						  desc_bytes);
+	edesc->iv_dir = DMA_FROM_DEVICE;
+
+	/* Make sure IV is located in a DMAable area */
+	iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+	iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, DMA_NONE, 0, 0);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (mapped_src_nents > 1)
+		sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
+				   0);
+
+	dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
+	sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
+			   dst_sg_idx + 1, 0);
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
+		kfree(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+	edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+		       sec4_sg_bytes, 1);
+#endif
+
+	return edesc;
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+	struct ablkcipher_request *req = &creq->creq;
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+				edesc, req);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+#define template_aead		template_u.aead
+#define template_ablkcipher	template_u.ablkcipher
+struct caam_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	u32 type;
+	union {
+		struct ablkcipher_alg ablkcipher;
+	} template_u;
+	u32 class1_alg_type;
+	u32 class2_alg_type;
+};
+
+static struct caam_alg_template driver_algs[] = {
+	/* ablkcipher descriptor */
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "chainiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "rfc3686(ctr(aes))",
+		.driver_name = "rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = xts_ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "eseqiv",
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+	},
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+	{
+		.aead = {
+			.base = {
+				.cra_name = "rfc4106(gcm(aes))",
+				.cra_driver_name = "rfc4106-gcm-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4106_setkey,
+			.setauthsize = rfc4106_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = GCM_RFC4106_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "rfc4543(gcm(aes))",
+				.cra_driver_name = "rfc4543-gcm-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4543_setkey,
+			.setauthsize = rfc4543_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = GCM_RFC4543_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	/* Galois Counter Mode */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "gcm(aes)",
+				.cra_driver_name = "gcm-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = gcm_setkey,
+			.setauthsize = gcm_setauthsize,
+			.encrypt = gcm_encrypt,
+			.decrypt = gcm_decrypt,
+			.ivsize = GCM_AES_IV_SIZE,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	/* single-pass ipsec_esp descriptor */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(md5),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-md5-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(sha1),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(sha224),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha256),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha384),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha512),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+};
+
+struct caam_crypto_alg {
+	struct crypto_alg crypto_alg;
+	struct list_head entry;
+	struct caam_alg_entry caam;
+};
+
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+			    bool uses_dkp)
+{
+	dma_addr_t dma_addr;
+	struct caam_drv_private *priv;
+
+	ctx->jrdev = caam_jr_alloc();
+	if (IS_ERR(ctx->jrdev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(ctx->jrdev);
+	}
+
+	priv = dev_get_drvdata(ctx->jrdev->parent);
+	if (priv->era >= 6 && uses_dkp)
+		ctx->dir = DMA_BIDIRECTIONAL;
+	else
+		ctx->dir = DMA_TO_DEVICE;
+
+	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
+					offsetof(struct caam_ctx,
+						 sh_desc_enc_dma),
+					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+		dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
+		caam_jr_free(ctx->jrdev);
+		return -ENOMEM;
+	}
+
+	ctx->sh_desc_enc_dma = dma_addr;
+	ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
+						   sh_desc_dec);
+	ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
+						      sh_desc_givenc);
+	ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
+
+	/* copy descriptor header template value */
+	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+	return 0;
+}
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct caam_crypto_alg *caam_alg =
+		 container_of(alg, struct caam_crypto_alg, crypto_alg);
+	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return caam_init_common(ctx, &caam_alg->caam, false);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+{
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct caam_aead_alg *caam_alg =
+		 container_of(alg, struct caam_aead_alg, aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+	return caam_init_common(ctx, &caam_alg->caam,
+				alg->setkey == aead_setkey);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
+			       offsetof(struct caam_ctx, sh_desc_enc_dma),
+			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	caam_jr_free(ctx->jrdev);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+	caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
+static void caam_aead_exit(struct crypto_aead *tfm)
+{
+	caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static void __exit caam_algapi_exit(void)
+{
+
+	struct caam_crypto_alg *t_alg, *n;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+		struct caam_aead_alg *t_alg = driver_aeads + i;
+
+		if (t_alg->registered)
+			crypto_unregister_aead(&t_alg->aead);
+	}
+
+	if (!alg_list.next)
+		return;
+
+	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+		crypto_unregister_alg(&t_alg->crypto_alg);
+		list_del(&t_alg->entry);
+		kfree(t_alg);
+	}
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+					      *template)
+{
+	struct caam_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg) {
+		pr_err("failed to allocate t_alg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	alg = &t_alg->crypto_alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 template->driver_name);
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = caam_cra_init;
+	alg->cra_exit = caam_cra_exit;
+	alg->cra_priority = CAAM_CRA_PRIORITY;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct caam_ctx);
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+			 template->type;
+	switch (template->type) {
+	case CRYPTO_ALG_TYPE_GIVCIPHER:
+		alg->cra_type = &crypto_givcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg->cra_type = &crypto_ablkcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	}
+
+	t_alg->caam.class1_alg_type = template->class1_alg_type;
+	t_alg->caam.class2_alg_type = template->class2_alg_type;
+
+	return t_alg;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+	struct aead_alg *alg = &t_alg->aead;
+
+	alg->base.cra_module = THIS_MODULE;
+	alg->base.cra_priority = CAAM_CRA_PRIORITY;
+	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+	alg->init = caam_aead_init;
+	alg->exit = caam_aead_exit;
+}
+
+static int __init caam_algapi_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	int i = 0, err = 0;
+	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
+	bool registered = false;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+
+	INIT_LIST_HEAD(&alg_list);
+
+	/*
+	 * Register crypto algorithms the device supports.
+	 * First, detect presence and attributes of DES, AES, and MD blocks.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+	/* If MD is present, limit digest size based on LP256 */
+	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+		md_limit = SHA256_DIGEST_SIZE;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		struct caam_crypto_alg *t_alg;
+		struct caam_alg_template *alg = driver_algs + i;
+		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (alg_sel == OP_ALG_ALGSEL_DES)))
+				continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+				continue;
+
+		/*
+		 * Check support for AES modes not available
+		 * on LP devices.
+		 */
+		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+			if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+			     OP_ALG_AAI_XTS)
+				continue;
+
+		t_alg = caam_alg_alloc(alg);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
+			continue;
+		}
+
+		err = crypto_register_alg(&t_alg->crypto_alg);
+		if (err) {
+			pr_warn("%s alg registration failed\n",
+				t_alg->crypto_alg.cra_driver_name);
+			kfree(t_alg);
+			continue;
+		}
+
+		list_add_tail(&t_alg->entry, &alg_list);
+		registered = true;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+		struct caam_aead_alg *t_alg = driver_aeads + i;
+		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+				continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+				continue;
+
+		/*
+		 * Check support for AES algorithms not available
+		 * on LP devices.
+		 */
+		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+			if (alg_aai == OP_ALG_AAI_GCM)
+				continue;
+
+		/*
+		 * Skip algorithms requiring message digests
+		 * if MD or MD size is not supported by device.
+		 */
+		if (c2_alg_sel &&
+		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+				continue;
+
+		caam_aead_alg_init(t_alg);
+
+		err = crypto_register_aead(&t_alg->aead);
+		if (err) {
+			pr_warn("%s alg registration failed\n",
+				t_alg->aead.base.cra_driver_name);
+			continue;
+		}
+
+		t_alg->registered = true;
+		registered = true;
+	}
+
+	if (registered)
+		pr_info("caam algorithms registered in /proc/crypto\n");
+
+	return err;
+}
+
+module_init(caam_algapi_init);
+module_exit(caam_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 0000000..a408edd
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1547 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+	u32 *jump_cmd, *uncond_jump_cmd;
+
+	/* DK bit is valid only for AES */
+	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+		append_operation(desc, type | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+		return;
+	}
+
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+	append_operation(desc, type | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT);
+	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, jump_cmd);
+	append_operation(desc, type | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+	set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ *                               (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize, int era)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (era < 6) {
+		if (adata->key_inline)
+			append_key_as_imm(desc, adata->key_virt,
+					  adata->keylen_pad, adata->keylen,
+					  CLASS_2 | KEY_DEST_MDHA_SPLIT |
+					  KEY_ENC);
+		else
+			append_key(desc, adata->key_dma, adata->keylen,
+				   CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	} else {
+		append_proto_dkp(desc, adata);
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* assoclen + cryptlen = seqinlen */
+	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Prepare to read and write cryptlen + assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+				    MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+				     MOVE_DEST_DESCBUF |
+				     MOVE_WAITCOMP |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+		    MOVE_AUX_LS);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead null enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ *                               (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize, int era)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (era < 6) {
+		if (adata->key_inline)
+			append_key_as_imm(desc, adata->key_virt,
+					  adata->keylen_pad, adata->keylen,
+					  CLASS_2 | KEY_DEST_MDHA_SPLIT |
+					  KEY_ENC);
+		else
+			append_key(desc, adata->key_dma, adata->keylen,
+				   CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	} else {
+		append_proto_dkp(desc, adata);
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* assoclen + cryptlen = seqoutlen */
+	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Prepare to read and write cryptlen + assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+				    MOVE_DEST_MATH2 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+				     MOVE_DEST_DESCBUF |
+				     MOVE_WAITCOMP |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Read and write cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/*
+	 * Insert a NOP here, since we need at least 4 instructions between
+	 * code patching the descriptor buffer and the location being patched.
+	 */
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, jump_cmd);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+		    MOVE_AUX_LS);
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Load ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead null dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+				  struct alginfo * const cdata,
+				  struct alginfo * const adata,
+				  const bool is_rfc3686, u32 *nonce, int era)
+{
+	u32 *key_jump_cmd;
+	unsigned int enckeylen = cdata->keylen;
+
+	/* Note: Context registers are saved. */
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/*
+	 * RFC3686 specific:
+	 *	| key = {AUTH_KEY, ENC_KEY, NONCE}
+	 *	| enckeylen = encryption key size + nonce size
+	 */
+	if (is_rfc3686)
+		enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+	if (era < 6) {
+		if (adata->key_inline)
+			append_key_as_imm(desc, adata->key_virt,
+					  adata->keylen_pad, adata->keylen,
+					  CLASS_2 | KEY_DEST_MDHA_SPLIT |
+					  KEY_ENC);
+		else
+			append_key(desc, adata->key_dma, adata->keylen,
+				   CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	} else {
+		append_proto_dkp(desc, adata);
+	}
+
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, enckeylen,
+				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686) {
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc,
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ *                          (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool is_rfc3686,
+			    u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
+			    int era)
+{
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+				LDST_SRCDST_BYTE_CONTEXT |
+				(ctx1_iv_off << LDST_OFFSET_SHIFT));
+	}
+
+	/* Read and write assoclen bytes */
+	if (is_qi || era < 3) {
+		append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+	} else {
+		append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+		append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+	}
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+				      FIFOLDST_VLF);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ *                          (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool geniv,
+			    const bool is_rfc3686, u32 *nonce,
+			    const u32 ctx1_iv_off, const bool is_qi, int era)
+{
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		if (!geniv)
+			append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+					LDST_SRCDST_BYTE_CONTEXT |
+					(ctx1_iv_off << LDST_OFFSET_SHIFT));
+	}
+
+	/* Read and write assoclen bytes */
+	if (is_qi || era < 3) {
+		append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+		if (geniv)
+			append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
+						ivsize);
+		else
+			append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
+					CAAM_CMD_SZ);
+	} else {
+		append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+		if (geniv)
+			append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
+						ivsize);
+		else
+			append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
+					CAAM_CMD_SZ);
+	}
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+
+	if (geniv) {
+		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+				LDST_SRCDST_BYTE_CONTEXT |
+				(ctx1_iv_off << LDST_OFFSET_SHIFT));
+		append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+			    (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+	}
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Choose operation */
+	if (ctx1_iv_off)
+		append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, cdata->algtype);
+
+	/* Read and write cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+	/* Load ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ *                             (non-protocol) with HW-generated initialization
+ *                             vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions.
+ *         A split key is required for SEC Era < 6; the size of the split key
+ *         is specified in this case. Valid algorithm values - one of
+ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ *         with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+			       struct alginfo *adata, unsigned int ivsize,
+			       unsigned int icvsize, const bool is_rfc3686,
+			       u32 *nonce, const u32 ctx1_iv_off,
+			       const bool is_qi, int era)
+{
+	u32 geniv, moveiv;
+
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+	}
+
+	if (is_rfc3686) {
+		if (is_qi)
+			append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+					LDST_SRCDST_BYTE_CONTEXT |
+					(ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+		goto copy_iv;
+	}
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_WAITCOMP |
+		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (ivsize << MOVE_LEN_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+	/* Copy IV to class 1 context */
+	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (ivsize << MOVE_LEN_SHIFT));
+
+	/* Return to encryption */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write assoclen bytes */
+	if (is_qi || era < 3) {
+		append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+	} else {
+		append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+		append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+	}
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+
+	/* Copy iv from outfifo to class 2 fifo */
+	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Will write ivsize + cryptlen */
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Not need to reload iv */
+	append_seq_fifo_load(desc, ivsize,
+			     FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead givenc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int ivsize, unsigned int icvsize,
+			   const bool is_qi)
+{
+	u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+	    *zero_assoc_jump_cmd2;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* skip key loading if they are loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
+					ivsize);
+	} else {
+		append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
+				CAAM_CMD_SZ);
+	}
+
+	/* if assoclen + cryptlen is ZERO, skip to ICV write */
+	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+						 JUMP_COND_MATH_Z);
+
+	if (is_qi)
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+					   JUMP_COND_MATH_Z);
+
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* cryptlen = seqinlen - assoclen */
+	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+	/* if cryptlen is ZERO jump to zero-payload commands */
+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+					    JUMP_COND_MATH_Z);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* write encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+	/* jump to ICV writing */
+	if (is_qi)
+		append_jump(desc, JUMP_TEST_ALL | 4);
+	else
+		append_jump(desc, JUMP_TEST_ALL | 2);
+
+	/* zero-payload commands */
+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+	if (is_qi)
+		/* jump to ICV writing */
+		append_jump(desc, JUMP_TEST_ALL | 2);
+
+	/* There is no input data */
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+	if (is_qi)
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+				     FIFOLD_TYPE_LAST1);
+
+	/* write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int ivsize, unsigned int icvsize,
+			   const bool is_qi)
+{
+	u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* skip key loading if they are loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL |
+				   JUMP_TEST_ALL | JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+						 JUMP_COND_MATH_Z);
+
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+	/* cryptlen = seqoutlen - assoclen */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* jump to zero-payload command if cryptlen is zero */
+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+					    JUMP_COND_MATH_Z);
+
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* store encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/* zero-payload command */
+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+	/* read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		/* Read salt and IV */
+		append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+					cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+					FIFOLD_TYPE_IV);
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Skip IV */
+	append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* cryptlen = seqoutlen - assoclen */
+	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Write encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 |
+				  KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	if (is_qi) {
+		u32 *wait_load_cmd;
+
+		/* REG3 = assoclen */
+		append_seq_load(desc, 4, LDST_CLASS_DECO |
+				LDST_SRCDST_WORD_DECO_MATH3 |
+				(4 << LDST_OFFSET_SHIFT));
+
+		wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_CALM | JUMP_COND_NCP |
+					    JUMP_COND_NOP | JUMP_COND_NIP |
+					    JUMP_COND_NIFP);
+		set_jump_tgt_here(desc, wait_load_cmd);
+
+		/* Read salt and IV */
+		append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+					cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+					FIFOLD_TYPE_IV);
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Skip IV */
+	append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* Will write cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Store payload data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read encrypted data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/* Read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	if (is_qi) {
+		/* assoclen is not needed, skip it */
+		append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+		/* Read salt and IV */
+		append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+					cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+					FIFOLD_TYPE_IV);
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	/* assoclen + cryptlen = seqinlen */
+	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
+
+	/* Will read assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Will write assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Read and write assoclen + cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	if (is_qi) {
+		/* assoclen is not needed, skip it */
+		append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+		/* Read salt and IV */
+		append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+					cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+					FIFOLD_TYPE_IV);
+		append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+				     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+	}
+
+	/* assoclen + cryptlen = seqoutlen */
+	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
+
+	/* Will read assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Will write assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Store payload data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* In-snoop assoclen + cryptlen data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Load iv */
+	append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* load IV */
+	append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Choose operation */
+	if (ctx1_iv_off)
+		append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, cdata->algtype);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ *                                   with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+				     unsigned int ivsize, const bool is_rfc3686,
+				     const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd, geniv;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load Nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+		(ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+		    MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Copy generated IV to memory */
+	append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	if (ctx1_iv_off)
+		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+			    (1 << JUMP_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ *                                    descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+	__be64 sector_size = cpu_to_be64(512);
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 keys only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load sector size with index 40 bytes (0x28) */
+	append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+			   LDST_SRCDST_BYTE_CONTEXT |
+			   (0x28 << LDST_OFFSET_SHIFT));
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/*
+	 * create sequence for loading the sector index
+	 * Upper 8B of IV - will be used as sector index
+	 * Lower 8B of IV - will be discarded
+	 */
+	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			(0x20 << LDST_OFFSET_SHIFT));
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ *                                    descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+	__be64 sector_size = cpu_to_be64(512);
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load sector size with index 40 bytes (0x28) */
+	append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+			   LDST_SRCDST_BYTE_CONTEXT |
+			   (0x28 << LDST_OFFSET_SHIFT));
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/*
+	 * create sequence for loading the sector index
+	 * Upper 8B of IV - will be used as sector index
+	 * Lower 8B of IV - will be discarded
+	 */
+	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			(0x20 << LDST_OFFSET_SHIFT));
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+	/* Load operation */
+	append_dec_op1(desc, cdata->algtype);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 0000000..a917af5
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_ENC_LEN		(DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_DEC_LEN		(DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_GIVENC_LEN		(DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_ENC_LEN		(DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_DEC_LEN		(DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_ENC_LEN		(DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_DEC_LEN		(DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_ENC_LEN		(DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_DEC_LEN		(DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
+					 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
+					 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize, int era);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize, int era);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool is_rfc3686,
+			    u32 *nonce, const u32 ctx1_iv_off,
+			    const bool is_qi, int era);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool geniv,
+			    const bool is_rfc3686, u32 *nonce,
+			    const u32 ctx1_iv_off, const bool is_qi, int era);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+			       struct alginfo *adata, unsigned int ivsize,
+			       unsigned int icvsize, const bool is_rfc3686,
+			       u32 *nonce, const u32 ctx1_iv_off,
+			       const bool is_qi, int era);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int ivsize, unsigned int icvsize,
+			   const bool is_qi);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int ivsize, unsigned int icvsize,
+			   const bool is_qi);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int ivsize, unsigned int icvsize,
+			       const bool is_qi);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+				     unsigned int ivsize, const bool is_rfc3686,
+				     const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
new file mode 100644
index 0000000..d7aa7d7
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -0,0 +1,2846 @@
+/*
+ * Freescale FSL CAAM support for crypto API over QI backend.
+ * Based on caamalg.c
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#include "compat.h"
+#include "ctrl.h"
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_qm.h"
+#include "key_gen.h"
+#include "qi.h"
+#include "jr.h"
+#include "caamalg_desc.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY		2000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
+					 SHA512_DIGEST_SIZE * 2)
+
+#define DESC_MAX_USED_BYTES		(DESC_QI_AEAD_GIVENC_LEN + \
+					 CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+struct caam_alg_entry {
+	int class1_alg_type;
+	int class2_alg_type;
+	bool rfc3686;
+	bool geniv;
+};
+
+struct caam_aead_alg {
+	struct aead_alg aead;
+	struct caam_alg_entry caam;
+	bool registered;
+};
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+	struct device *jrdev;
+	u32 sh_desc_enc[DESC_MAX_USED_LEN];
+	u32 sh_desc_dec[DESC_MAX_USED_LEN];
+	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+	u8 key[CAAM_MAX_KEY_SIZE];
+	dma_addr_t key_dma;
+	enum dma_data_direction dir;
+	struct alginfo adata;
+	struct alginfo cdata;
+	unsigned int authsize;
+	struct device *qidev;
+	spinlock_t lock;	/* Protects multiple init of driver context */
+	struct caam_drv_ctx *drv_ctx[NUM_OP];
+};
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 typeof(*alg), aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	u32 ctx1_iv_off = 0;
+	u32 *nonce = NULL;
+	unsigned int data_len[2];
+	u32 inl_mask;
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = alg->caam.rfc3686;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+	}
+
+	data_len[0] = ctx->adata.keylen_pad;
+	data_len[1] = ctx->cdata.keylen;
+
+	if (alg->caam.geniv)
+		goto skip_enc;
+
+	/* aead_encrypt shared descriptor */
+	if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+			       ivsize, ctx->authsize, is_rfc3686, nonce,
+			       ctx1_iv_off, true, ctrlpriv->era);
+
+skip_enc:
+	/* aead_decrypt shared descriptor */
+	if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
+			       ivsize, ctx->authsize, alg->caam.geniv,
+			       is_rfc3686, nonce, ctx1_iv_off, true,
+			       ctrlpriv->era);
+
+	if (!alg->caam.geniv)
+		goto skip_givenc;
+
+	/* aead_givencrypt shared descriptor */
+	if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
+
+	cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+				  ivsize, ctx->authsize, is_rfc3686, nonce,
+				  ctx1_iv_off, true, ctrlpriv->era);
+
+skip_givenc:
+	return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	aead_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+		       unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	struct crypto_authenc_keys keys;
+	int ret = 0;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+#ifdef DEBUG
+	dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+		keys.authkeylen + keys.enckeylen, keys.enckeylen,
+		keys.authkeylen);
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	/*
+	 * If DKP is supported, use it in the shared descriptor to generate
+	 * the split key.
+	 */
+	if (ctrlpriv->era >= 6) {
+		ctx->adata.keylen = keys.authkeylen;
+		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+						      OP_ALG_ALGSEL_MASK);
+
+		if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+			goto badkey;
+
+		memcpy(ctx->key, keys.authkey, keys.authkeylen);
+		memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+		       keys.enckeylen);
+		dma_sync_single_for_device(jrdev, ctx->key_dma,
+					   ctx->adata.keylen_pad +
+					   keys.enckeylen, ctx->dir);
+		goto skip_split_key;
+	}
+
+	ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
+			    keys.enckeylen);
+	if (ret)
+		goto badkey;
+
+	/* postpend encryption key to auth split key */
+	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+				   keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+		       ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+skip_split_key:
+	ctx->cdata.keylen = keys.enckeylen;
+
+	ret = aead_set_sh_desc(aead);
+	if (ret)
+		goto badkey;
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			goto badkey;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			goto badkey;
+		}
+	}
+
+	memzero_explicit(&keys, sizeof(keys));
+	return ret;
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+			      ctx->authsize, true);
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+			      ctx->authsize, true);
+
+	return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	gcm_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+		      const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+	ctx->cdata.keylen = keylen;
+
+	ret = gcm_set_sh_desc(aead);
+	if (ret)
+		return ret;
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			return ret;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	ctx->cdata.key_virt = ctx->key;
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+				  ctx->authsize, true);
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+				  ctx->authsize, true);
+
+	return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4106_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->cdata.keylen = keylen - 4;
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+				   ctx->dir);
+
+	ret = rfc4106_set_sh_desc(aead);
+	if (ret)
+		return ret;
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			return ret;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
+	int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
+
+	if (!ctx->cdata.keylen || !ctx->authsize)
+		return 0;
+
+	ctx->cdata.key_virt = ctx->key;
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+				  ctx->authsize, true);
+
+	/*
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
+
+	cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+				  ctx->authsize, true);
+
+	return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4543_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->cdata.keylen = keylen - 4;
+	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+				   ctx->dir);
+
+	ret = rfc4543_set_sh_desc(aead);
+	if (ret)
+		return ret;
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			return ret;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+	struct device *jrdev = ctx->jrdev;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 ctx1_iv_off = 0;
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+	int ret = 0;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 *	| *key = {KEY, NONCE}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		keylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = key;
+	ctx->cdata.key_inline = true;
+
+	/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
+	cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+				     is_rfc3686, ctx1_iv_off);
+	cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+				     is_rfc3686, ctx1_iv_off);
+	cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
+					ivsize, is_rfc3686, ctx1_iv_off);
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			goto badkey;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			goto badkey;
+		}
+	}
+
+	if (ctx->drv_ctx[GIVENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
+					  ctx->sh_desc_givenc);
+		if (ret) {
+			dev_err(jrdev, "driver givenc context update failed\n");
+			goto badkey;
+		}
+	}
+
+	return ret;
+badkey:
+	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+				 const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	int ret = 0;
+
+	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+		dev_err(jrdev, "key size mismatch\n");
+		goto badkey;
+	}
+
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = key;
+	ctx->cdata.key_inline = true;
+
+	/* xts ablkcipher encrypt, decrypt shared descriptors */
+	cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+	cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+
+	/* Now update the driver contexts with the new shared descriptor */
+	if (ctx->drv_ctx[ENCRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+					  ctx->sh_desc_enc);
+		if (ret) {
+			dev_err(jrdev, "driver enc context update failed\n");
+			goto badkey;
+		}
+	}
+
+	if (ctx->drv_ctx[DECRYPT]) {
+		ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+					  ctx->sh_desc_dec);
+		if (ret) {
+			dev_err(jrdev, "driver dec context update failed\n");
+			goto badkey;
+		}
+	}
+
+	return ret;
+badkey:
+	crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table, followed by IV
+ */
+struct aead_edesc {
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	int qm_sg_bytes;
+	dma_addr_t qm_sg_dma;
+	unsigned int assoclen;
+	dma_addr_t assoclen_dma;
+	struct caam_drv_req drv_req;
+	struct qm_sg_entry sgt[0];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table, followed by IV
+ */
+struct ablkcipher_edesc {
+	int src_nents;
+	int dst_nents;
+	dma_addr_t iv_dma;
+	int qm_sg_bytes;
+	dma_addr_t qm_sg_dma;
+	struct caam_drv_req drv_req;
+	struct qm_sg_entry sgt[0];
+};
+
+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
+					enum optype type)
+{
+	/*
+	 * This function is called on the fast path with values of 'type'
+	 * known at compile time. Invalid arguments are not expected and
+	 * thus no checks are made.
+	 */
+	struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
+	u32 *desc;
+
+	if (unlikely(!drv_ctx)) {
+		spin_lock(&ctx->lock);
+
+		/* Read again to check if some other core init drv_ctx */
+		drv_ctx = ctx->drv_ctx[type];
+		if (!drv_ctx) {
+			int cpu;
+
+			if (type == ENCRYPT)
+				desc = ctx->sh_desc_enc;
+			else if (type == DECRYPT)
+				desc = ctx->sh_desc_dec;
+			else /* (type == GIVENCRYPT) */
+				desc = ctx->sh_desc_givenc;
+
+			cpu = smp_processor_id();
+			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
+			if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+				drv_ctx->op_type = type;
+
+			ctx->drv_ctx[type] = drv_ctx;
+		}
+
+		spin_unlock(&ctx->lock);
+	}
+
+	return drv_ctx;
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+		       struct scatterlist *dst, int src_nents,
+		       int dst_nents, dma_addr_t iv_dma, int ivsize,
+		       enum optype op_type, dma_addr_t qm_sg_dma,
+		       int qm_sg_bytes)
+{
+	if (dst != src) {
+		if (src_nents)
+			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+	}
+
+	if (iv_dma)
+		dma_unmap_single(dev, iv_dma, ivsize,
+				 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
+							 DMA_TO_DEVICE);
+	if (qm_sg_bytes)
+		dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static void aead_unmap(struct device *dev,
+		       struct aead_edesc *edesc,
+		       struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	int ivsize = crypto_aead_ivsize(aead);
+
+	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
+	dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+			     struct ablkcipher_edesc *edesc,
+			     struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+	caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+		   edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+		   edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_done(struct caam_drv_req *drv_req, u32 status)
+{
+	struct device *qidev;
+	struct aead_edesc *edesc;
+	struct aead_request *aead_req = drv_req->app_ctx;
+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+	struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+	int ecode = 0;
+
+	qidev = caam_ctx->qidev;
+
+	if (unlikely(status)) {
+		u32 ssrc = status & JRSTA_SSRC_MASK;
+		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+		caam_jr_strstatus(qidev, status);
+		/*
+		 * verify hw auth check passed else return -EBADMSG
+		 */
+		if (ssrc == JRSTA_SSRC_CCB_ERROR &&
+		    err_id == JRSTA_CCBERR_ERRID_ICVCHK)
+			ecode = -EBADMSG;
+		else
+			ecode = -EIO;
+	}
+
+	edesc = container_of(drv_req, typeof(*edesc), drv_req);
+	aead_unmap(qidev, edesc, aead_req);
+
+	aead_request_complete(aead_req, ecode);
+	qi_cache_free(edesc);
+}
+
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+					   bool encrypt)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 typeof(*alg), aead);
+	struct device *qidev = ctx->qidev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct aead_edesc *edesc;
+	dma_addr_t qm_sg_dma, iv_dma = 0;
+	int ivsize = 0;
+	unsigned int authsize = ctx->authsize;
+	int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
+	int in_len, out_len;
+	struct qm_sg_entry *sg_table, *fd_sgt;
+	struct caam_drv_ctx *drv_ctx;
+	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+	drv_ctx = get_drv_ctx(ctx, op_type);
+	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+		return (struct aead_edesc *)drv_ctx;
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = qi_cache_alloc(GFP_DMA | flags);
+	if (unlikely(!edesc)) {
+		dev_err(qidev, "could not allocate extended descriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (likely(req->src == req->dst)) {
+		src_nents = sg_nents_for_len(req->src, req->assoclen +
+					     req->cryptlen +
+						(encrypt ? authsize : 0));
+		if (unlikely(src_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+				req->assoclen + req->cryptlen +
+				(encrypt ? authsize : 0));
+			qi_cache_free(edesc);
+			return ERR_PTR(src_nents);
+		}
+
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			qi_cache_free(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		src_nents = sg_nents_for_len(req->src, req->assoclen +
+					     req->cryptlen);
+		if (unlikely(src_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+				req->assoclen + req->cryptlen);
+			qi_cache_free(edesc);
+			return ERR_PTR(src_nents);
+		}
+
+		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+					     req->cryptlen +
+					     (encrypt ? authsize :
+							(-authsize)));
+		if (unlikely(dst_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+				req->assoclen + req->cryptlen +
+				(encrypt ? authsize : (-authsize)));
+			qi_cache_free(edesc);
+			return ERR_PTR(dst_nents);
+		}
+
+		if (src_nents) {
+			mapped_src_nents = dma_map_sg(qidev, req->src,
+						      src_nents, DMA_TO_DEVICE);
+			if (unlikely(!mapped_src_nents)) {
+				dev_err(qidev, "unable to map source\n");
+				qi_cache_free(edesc);
+				return ERR_PTR(-ENOMEM);
+			}
+		} else {
+			mapped_src_nents = 0;
+		}
+
+		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(qidev, "unable to map destination\n");
+			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+			qi_cache_free(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+		ivsize = crypto_aead_ivsize(aead);
+
+	/*
+	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+	 * Input is not contiguous.
+	 */
+	qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+	sg_table = &edesc->sgt[0];
+	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+		     CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (ivsize) {
+		u8 *iv = (u8 *)(sg_table + qm_sg_ents);
+
+		/* Make sure IV is located in a DMAable area */
+		memcpy(iv, req->iv, ivsize);
+
+		iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(qidev, iv_dma)) {
+			dev_err(qidev, "unable to map IV\n");
+			caam_unmap(qidev, req->src, req->dst, src_nents,
+				   dst_nents, 0, 0, 0, 0, 0);
+			qi_cache_free(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->drv_req.app_ctx = req;
+	edesc->drv_req.cbk = aead_done;
+	edesc->drv_req.drv_ctx = drv_ctx;
+
+	edesc->assoclen = cpu_to_caam32(req->assoclen);
+	edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
+					     DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
+		dev_err(qidev, "unable to map assoclen\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+	qm_sg_index++;
+	if (ivsize) {
+		dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+		qm_sg_index++;
+	}
+	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+	qm_sg_index += mapped_src_nents;
+
+	if (mapped_dst_nents > 1)
+		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+				 qm_sg_index, 0);
+
+	qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, qm_sg_dma)) {
+		dev_err(qidev, "unable to map S/G table\n");
+		dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->qm_sg_dma = qm_sg_dma;
+	edesc->qm_sg_bytes = qm_sg_bytes;
+
+	out_len = req->assoclen + req->cryptlen +
+		  (encrypt ? ctx->authsize : (-ctx->authsize));
+	in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+	fd_sgt = &edesc->drv_req.fd_sgt[0];
+	dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
+	if (req->dst == req->src) {
+		if (mapped_src_nents == 1)
+			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+					 out_len, 0);
+		else
+			dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
+					     (1 + !!ivsize) * sizeof(*sg_table),
+					     out_len, 0);
+	} else if (mapped_dst_nents == 1) {
+		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
+				 0);
+	} else {
+		dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+				     qm_sg_index, out_len, 0);
+	}
+
+	return edesc;
+}
+
+static inline int aead_crypt(struct aead_request *req, bool encrypt)
+{
+	struct aead_edesc *edesc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	int ret;
+
+	if (unlikely(caam_congested))
+		return -EAGAIN;
+
+	/* allocate extended descriptor */
+	edesc = aead_edesc_alloc(req, encrypt);
+	if (IS_ERR_OR_NULL(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor */
+	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(ctx->qidev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+	return aead_crypt(req, true);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+	return aead_crypt(req, false);
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return aead_crypt(req, true);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return aead_crypt(req, false);
+}
+
+static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+{
+	struct ablkcipher_edesc *edesc;
+	struct ablkcipher_request *req = drv_req->app_ctx;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *qidev = caam_ctx->qidev;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+#ifdef DEBUG
+	dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
+#endif
+
+	edesc = container_of(drv_req, typeof(*edesc), drv_req);
+
+	if (status)
+		caam_jr_strstatus(qidev, status);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       edesc->src_nents > 1 ? 100 : ivsize, 1);
+	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
+		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+	ablkcipher_unmap(qidev, edesc, req);
+
+	/* In case initial IV was generated, copy it in GIVCIPHER request */
+	if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
+		u8 *iv;
+		struct skcipher_givcrypt_request *greq;
+
+		greq = container_of(req, struct skcipher_givcrypt_request,
+				    creq);
+		iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
+		memcpy(greq->giv, iv, ivsize);
+	}
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block. This is used e.g. by the CTS mode.
+	 */
+	if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
+		scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+					 ivsize, ivsize, 0);
+
+	qi_cache_free(edesc);
+	ablkcipher_request_complete(req, status);
+}
+
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+						       *req, bool encrypt)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *qidev = ctx->qidev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	u8 *iv;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+	struct qm_sg_entry *sg_table, *fd_sgt;
+	struct caam_drv_ctx *drv_ctx;
+	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+	drv_ctx = get_drv_ctx(ctx, op_type);
+	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+		return (struct ablkcipher_edesc *)drv_ctx;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (unlikely(req->src != req->dst)) {
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(qidev, "unable to map destination\n");
+			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	qm_sg_ents = 1 + mapped_src_nents;
+	dst_sg_idx = qm_sg_ents;
+
+	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* allocate space for base edesc, link tables and IV */
+	edesc = qi_cache_alloc(GFP_DMA | flags);
+	if (unlikely(!edesc)) {
+		dev_err(qidev, "could not allocate extended descriptor\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Make sure IV is located in a DMAable area */
+	sg_table = &edesc->sgt[0];
+	iv = (u8 *)(sg_table + qm_sg_ents);
+	memcpy(iv, req->info, ivsize);
+
+	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, iv_dma)) {
+		dev_err(qidev, "unable to map IV\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->qm_sg_bytes = qm_sg_bytes;
+	edesc->drv_req.app_ctx = req;
+	edesc->drv_req.cbk = ablkcipher_done;
+	edesc->drv_req.drv_ctx = drv_ctx;
+
+	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+
+	if (mapped_dst_nents > 1)
+		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+				 dst_sg_idx, 0);
+
+	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+					  DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
+		dev_err(qidev, "unable to map S/G table\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, op_type, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+	dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+				  ivsize + req->nbytes, 0);
+
+	if (req->src == req->dst) {
+		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+				     sizeof(*sg_table), req->nbytes, 0);
+	} else if (mapped_dst_nents > 1) {
+		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+				     sizeof(*sg_table), req->nbytes, 0);
+	} else {
+		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+				 req->nbytes, 0);
+	}
+
+	return edesc;
+}
+
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+	struct skcipher_givcrypt_request *creq)
+{
+	struct ablkcipher_request *req = &creq->creq;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *qidev = ctx->qidev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma;
+	u8 *iv;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	struct qm_sg_entry *sg_table, *fd_sgt;
+	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+	struct caam_drv_ctx *drv_ctx;
+
+	drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
+	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+		return (struct ablkcipher_edesc *)drv_ctx;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (unlikely(src_nents < 0)) {
+		dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+			req->nbytes);
+		return ERR_PTR(src_nents);
+	}
+
+	if (unlikely(req->src != req->dst)) {
+		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+		if (unlikely(dst_nents < 0)) {
+			dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+				req->nbytes);
+			return ERR_PTR(dst_nents);
+		}
+
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_TO_DEVICE);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+					      DMA_FROM_DEVICE);
+		if (unlikely(!mapped_dst_nents)) {
+			dev_err(qidev, "unable to map destination\n");
+			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
+	} else {
+		mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+					      DMA_BIDIRECTIONAL);
+		if (unlikely(!mapped_src_nents)) {
+			dev_err(qidev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
+		dst_nents = src_nents;
+		mapped_dst_nents = src_nents;
+	}
+
+	qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+	dst_sg_idx = qm_sg_ents;
+
+	qm_sg_ents += 1 + mapped_dst_nents;
+	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* allocate space for base edesc, link tables and IV */
+	edesc = qi_cache_alloc(GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(qidev, "could not allocate extended descriptor\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Make sure IV is located in a DMAable area */
+	sg_table = &edesc->sgt[0];
+	iv = (u8 *)(sg_table + qm_sg_ents);
+	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
+	if (dma_mapping_error(qidev, iv_dma)) {
+		dev_err(qidev, "unable to map IV\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->iv_dma = iv_dma;
+	edesc->qm_sg_bytes = qm_sg_bytes;
+	edesc->drv_req.app_ctx = req;
+	edesc->drv_req.cbk = ablkcipher_done;
+	edesc->drv_req.drv_ctx = drv_ctx;
+
+	if (mapped_src_nents > 1)
+		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+
+	dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+	sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
+			 0);
+
+	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+					  DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
+		dev_err(qidev, "unable to map S/G table\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
+		qi_cache_free(edesc);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+	if (mapped_src_nents > 1)
+		dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
+				     0);
+	else
+		dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
+				 req->nbytes, 0);
+
+	dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+			     sizeof(*sg_table), ivsize + req->nbytes, 0);
+
+	return edesc;
+}
+
+static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	int ret;
+
+	if (unlikely(caam_congested))
+		return -EAGAIN;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, encrypt);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block.
+	 */
+	if (!encrypt)
+		scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+					 ivsize, ivsize, 0);
+
+	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(ctx->qidev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	return ablkcipher_crypt(req, true);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	return ablkcipher_crypt(req, false);
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+	struct ablkcipher_request *req = &creq->creq;
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	int ret;
+
+	if (unlikely(caam_congested))
+		return -EAGAIN;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_giv_edesc_alloc(creq);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(ctx->qidev, edesc, req);
+		qi_cache_free(edesc);
+	}
+
+	return ret;
+}
+
+#define template_ablkcipher	template_u.ablkcipher
+struct caam_alg_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	u32 type;
+	union {
+		struct ablkcipher_alg ablkcipher;
+	} template_u;
+	u32 class1_alg_type;
+	u32 class2_alg_type;
+};
+
+static struct caam_alg_template driver_algs[] = {
+	/* ablkcipher descriptor */
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-caam-qi",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-caam-qi",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-caam-qi",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-caam-qi",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "chainiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "rfc3686(ctr(aes))",
+		.driver_name = "rfc3686-ctr-aes-caam-qi",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-caam-qi",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = xts_ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "eseqiv",
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+	},
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+	{
+		.aead = {
+			.base = {
+				.cra_name = "rfc4106(gcm(aes))",
+				.cra_driver_name = "rfc4106-gcm-aes-caam-qi",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4106_setkey,
+			.setauthsize = rfc4106_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = 8,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "rfc4543(gcm(aes))",
+				.cra_driver_name = "rfc4543-gcm-aes-caam-qi",
+				.cra_blocksize = 1,
+			},
+			.setkey = rfc4543_setkey,
+			.setauthsize = rfc4543_setauthsize,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
+			.ivsize = 8,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		},
+	},
+	/* Galois Counter Mode */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "gcm(aes)",
+				.cra_driver_name = "gcm-aes-caam-qi",
+				.cra_blocksize = 1,
+			},
+			.setkey = gcm_setkey,
+			.setauthsize = gcm_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = 12,
+			.maxauthsize = AES_BLOCK_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+		}
+	},
+	/* single-pass ipsec_esp descriptor */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-aes-"
+						   "caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-aes-"
+						   "caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-aes-caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-aes-"
+						   "caam-qi",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-"
+						   "cbc-des3_ede-caam-qi",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-des-"
+						   "caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-des-"
+						   "caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-des-"
+						   "caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des-caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-des-"
+						   "caam-qi",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.geniv = true,
+		}
+	},
+};
+
+struct caam_crypto_alg {
+	struct list_head entry;
+	struct crypto_alg crypto_alg;
+	struct caam_alg_entry caam;
+};
+
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+			    bool uses_dkp)
+{
+	struct caam_drv_private *priv;
+
+	/*
+	 * distribute tfms across job rings to ensure in-order
+	 * crypto request processing per tfm
+	 */
+	ctx->jrdev = caam_jr_alloc();
+	if (IS_ERR(ctx->jrdev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(ctx->jrdev);
+	}
+
+	priv = dev_get_drvdata(ctx->jrdev->parent);
+	if (priv->era >= 6 && uses_dkp)
+		ctx->dir = DMA_BIDIRECTIONAL;
+	else
+		ctx->dir = DMA_TO_DEVICE;
+
+	ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+				      ctx->dir);
+	if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+		dev_err(ctx->jrdev, "unable to map key\n");
+		caam_jr_free(ctx->jrdev);
+		return -ENOMEM;
+	}
+
+	/* copy descriptor header template value */
+	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+	ctx->qidev = priv->qidev;
+
+	spin_lock_init(&ctx->lock);
+	ctx->drv_ctx[ENCRYPT] = NULL;
+	ctx->drv_ctx[DECRYPT] = NULL;
+	ctx->drv_ctx[GIVENCRYPT] = NULL;
+
+	return 0;
+}
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+							crypto_alg);
+	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return caam_init_common(ctx, &caam_alg->caam, false);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+{
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+						      aead);
+	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+	return caam_init_common(ctx, &caam_alg->caam,
+				alg->setkey == aead_setkey);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+	caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
+	caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
+	caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
+
+	dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
+
+	caam_jr_free(ctx->jrdev);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+	caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
+static void caam_aead_exit(struct crypto_aead *tfm)
+{
+	caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct list_head alg_list;
+static void __exit caam_qi_algapi_exit(void)
+{
+	struct caam_crypto_alg *t_alg, *n;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+		struct caam_aead_alg *t_alg = driver_aeads + i;
+
+		if (t_alg->registered)
+			crypto_unregister_aead(&t_alg->aead);
+	}
+
+	if (!alg_list.next)
+		return;
+
+	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+		crypto_unregister_alg(&t_alg->crypto_alg);
+		list_del(&t_alg->entry);
+		kfree(t_alg);
+	}
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+					      *template)
+{
+	struct caam_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	alg = &t_alg->crypto_alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 template->driver_name);
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = caam_cra_init;
+	alg->cra_exit = caam_cra_exit;
+	alg->cra_priority = CAAM_CRA_PRIORITY;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct caam_ctx);
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+			 template->type;
+	switch (template->type) {
+	case CRYPTO_ALG_TYPE_GIVCIPHER:
+		alg->cra_type = &crypto_givcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg->cra_type = &crypto_ablkcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
+	}
+
+	t_alg->caam.class1_alg_type = template->class1_alg_type;
+	t_alg->caam.class2_alg_type = template->class2_alg_type;
+
+	return t_alg;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+	struct aead_alg *alg = &t_alg->aead;
+
+	alg->base.cra_module = THIS_MODULE;
+	alg->base.cra_priority = CAAM_CRA_PRIORITY;
+	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+	alg->init = caam_aead_init;
+	alg->exit = caam_aead_exit;
+}
+
+static int __init caam_qi_algapi_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	int i = 0, err = 0;
+	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
+	bool registered = false;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	of_node_put(dev_node);
+	if (!pdev)
+		return -ENODEV;
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv || !priv->qi_present)
+		return -ENODEV;
+
+	if (caam_dpaa2) {
+		dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
+		return -ENODEV;
+	}
+
+	INIT_LIST_HEAD(&alg_list);
+
+	/*
+	 * Register crypto algorithms the device supports.
+	 * First, detect presence and attributes of DES, AES, and MD blocks.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+	/* If MD is present, limit digest size based on LP256 */
+	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+		md_limit = SHA256_DIGEST_SIZE;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		struct caam_crypto_alg *t_alg;
+		struct caam_alg_template *alg = driver_algs + i;
+		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (alg_sel == OP_ALG_ALGSEL_DES)))
+			continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+			continue;
+
+		t_alg = caam_alg_alloc(alg);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			dev_warn(priv->qidev, "%s alg allocation failed\n",
+				 alg->driver_name);
+			continue;
+		}
+
+		err = crypto_register_alg(&t_alg->crypto_alg);
+		if (err) {
+			dev_warn(priv->qidev, "%s alg registration failed\n",
+				 t_alg->crypto_alg.cra_driver_name);
+			kfree(t_alg);
+			continue;
+		}
+
+		list_add_tail(&t_alg->entry, &alg_list);
+		registered = true;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+		struct caam_aead_alg *t_alg = driver_aeads + i;
+		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+			continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+			continue;
+
+		/*
+		 * Check support for AES algorithms not available
+		 * on LP devices.
+		 */
+		if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
+		    (alg_aai == OP_ALG_AAI_GCM))
+			continue;
+
+		/*
+		 * Skip algorithms requiring message digests
+		 * if MD or MD size is not supported by device.
+		 */
+		if (c2_alg_sel &&
+		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+			continue;
+
+		caam_aead_alg_init(t_alg);
+
+		err = crypto_register_aead(&t_alg->aead);
+		if (err) {
+			pr_warn("%s alg registration failed\n",
+				t_alg->aead.base.cra_driver_name);
+			continue;
+		}
+
+		t_alg->registered = true;
+		registered = true;
+	}
+
+	if (registered)
+		dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+
+	return err;
+}
+
+module_init(caam_qi_algapi_init);
+module_exit(caam_qi_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
new file mode 100644
index 0000000..43975ab
--- /dev/null
+++ b/drivers/crypto/caam/caamhash.c
@@ -0,0 +1,1962 @@
+/*
+ * caam - Freescale FSL CAAM support for ahash functions of crypto API
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship of digest job descriptor or first job descriptor after init to
+ * shared descriptors:
+ *
+ * ---------------                     ---------------
+ * | JobDesc #1  |-------------------->|  ShareDesc  |
+ * | *(packet 1) |                     |  (hashKey)  |
+ * ---------------                     | (operation) |
+ *                                     ---------------
+ *
+ * relationship of subsequent job descriptors to shared descriptors:
+ *
+ * ---------------                     ---------------
+ * | JobDesc #2  |-------------------->|  ShareDesc  |
+ * | *(packet 2) |      |------------->|  (hashKey)  |
+ * ---------------      |    |-------->| (operation) |
+ *       .              |    |         | (load ctx2) |
+ *       .              |    |         ---------------
+ * ---------------      |    |
+ * | JobDesc #3  |------|    |
+ * | *(packet 3) |           |
+ * ---------------           |
+ *       .                   |
+ *       .                   |
+ * ---------------           |
+ * | JobDesc #4  |------------
+ * | *(packet 4) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header            |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR       |
+ * | (output buffer)   |
+ * | (output length)   |
+ * | SEQ_IN_PTR        |
+ * | (input buffer)    |
+ * | (input length)    |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+
+#define CAAM_CRA_PRIORITY		3000
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE		(SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE	SHA512_BLOCK_SIZE
+#define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
+
+/* length of descriptors text */
+#define DESC_AHASH_BASE			(3 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINUP_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+#define DESC_HASH_MAX_USED_BYTES	(DESC_AHASH_FINAL_LEN + \
+					 CAAM_MAX_HASH_KEY_SIZE)
+#define DESC_HASH_MAX_USED_LEN		(DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN			8
+#define MAX_CTX_LEN			(HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+
+static struct list_head hash_list;
+
+/* ahash per-session context */
+struct caam_hash_ctx {
+	u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
+	dma_addr_t sh_desc_update_first_dma;
+	dma_addr_t sh_desc_fin_dma;
+	dma_addr_t sh_desc_digest_dma;
+	enum dma_data_direction dir;
+	struct device *jrdev;
+	u8 key[CAAM_MAX_HASH_KEY_SIZE];
+	int ctx_len;
+	struct alginfo adata;
+};
+
+/* ahash state */
+struct caam_hash_state {
+	dma_addr_t buf_dma;
+	dma_addr_t ctx_dma;
+	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+	int buflen_0;
+	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+	int buflen_1;
+	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
+	int (*update)(struct ahash_request *req);
+	int (*final)(struct ahash_request *req);
+	int (*finup)(struct ahash_request *req);
+	int current_buf;
+};
+
+struct caam_export_state {
+	u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
+	u8 caam_ctx[MAX_CTX_LEN];
+	int buflen;
+	int (*update)(struct ahash_request *req);
+	int (*final)(struct ahash_request *req);
+	int (*finup)(struct ahash_request *req);
+};
+
+static inline void switch_buf(struct caam_hash_state *state)
+{
+	state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+	return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+	return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+	return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
+/* Common job descriptor seq in/out ptr routines */
+
+/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
+static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+				      struct caam_hash_state *state,
+				      int ctx_len)
+{
+	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
+					ctx_len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, state->ctx_dma)) {
+		dev_err(jrdev, "unable to map ctx\n");
+		state->ctx_dma = 0;
+		return -ENOMEM;
+	}
+
+	append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+
+	return 0;
+}
+
+/* Map req->result, and append seq_out_ptr command that points to it */
+static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
+						u8 *result, int digestsize)
+{
+	dma_addr_t dst_dma;
+
+	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
+	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+
+	return dst_dma;
+}
+
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_sec4_sg(struct device *jrdev,
+				     struct sec4_sg_entry *sec4_sg,
+				     struct caam_hash_state *state)
+{
+	int buflen = *current_buflen(state);
+
+	if (!buflen)
+		return 0;
+
+	state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+					DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, state->buf_dma)) {
+		dev_err(jrdev, "unable to map buf\n");
+		state->buf_dma = 0;
+		return -ENOMEM;
+	}
+
+	dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
+
+	return 0;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline int ctx_map_to_sec4_sg(struct device *jrdev,
+				     struct caam_hash_state *state, int ctx_len,
+				     struct sec4_sg_entry *sec4_sg, u32 flag)
+{
+	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+	if (dma_mapping_error(jrdev, state->ctx_dma)) {
+		dev_err(jrdev, "unable to map ctx\n");
+		state->ctx_dma = 0;
+		return -ENOMEM;
+	}
+
+	dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+
+	return 0;
+}
+
+/*
+ * For ahash update, final and finup (import_ctx = true)
+ *     import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ *     read and write to seqout
+ */
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+				     struct caam_hash_ctx *ctx, bool import_ctx,
+				     int era)
+{
+	u32 op = ctx->adata.algtype;
+	u32 *skip_key_load;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Append key if it has been set; ahash update excluded */
+	if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+		/* Skip key loading if already shared */
+		skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_SHRD);
+
+		if (era < 6)
+			append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+					  ctx->adata.keylen, CLASS_2 |
+					  KEY_DEST_MDHA_SPLIT | KEY_ENC);
+		else
+			append_proto_dkp(desc, &ctx->adata);
+
+		set_jump_tgt_here(desc, skip_key_load);
+
+		op |= OP_ALG_AAI_HMAC_PRECOMP;
+	}
+
+	/* If needed, import context from software */
+	if (import_ctx)
+		append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+				LDST_SRCDST_BYTE_CONTEXT);
+
+	/* Class 2 operation */
+	append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+	/*
+	 * Load from buf and/or src and write to req->result or state->context
+	 * Calculate remaining bytes to read
+	 */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	/* Read remaining bytes */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+			     FIFOLD_TYPE_MSG | KEY_VLF);
+	/* Store class2 context bytes */
+	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+	u32 *desc;
+
+	ctx->adata.key_virt = ctx->key;
+
+	/* ahash_update shared descriptor */
+	desc = ctx->sh_desc_update;
+	ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
+			  ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+				   desc_bytes(desc), ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ahash update shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	/* ahash_update_first shared descriptor */
+	desc = ctx->sh_desc_update_first;
+	ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
+			  ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+				   desc_bytes(desc), ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ahash update first shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	/* ahash_final shared descriptor */
+	desc = ctx->sh_desc_fin;
+	ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
+			  ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+				   desc_bytes(desc), ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	/* ahash_digest shared descriptor */
+	desc = ctx->sh_desc_digest;
+	ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
+			  ctrlpriv->era);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+				   desc_bytes(desc), ctx->dir);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ahash digest shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	return 0;
+}
+
+/* Digest hash size if it is too large */
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+			   u32 *keylen, u8 *key_out, u32 digestsize)
+{
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+	struct split_key_result result;
+	dma_addr_t src_dma, dst_dma;
+	int ret;
+
+	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+	if (!desc) {
+		dev_err(jrdev, "unable to allocate key input memory\n");
+		return -ENOMEM;
+	}
+
+	init_job_desc(desc, 0);
+
+	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
+				 DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, src_dma)) {
+		dev_err(jrdev, "unable to map key input memory\n");
+		kfree(desc);
+		return -ENOMEM;
+	}
+	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
+				 DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, dst_dma)) {
+		dev_err(jrdev, "unable to map key output memory\n");
+		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+		kfree(desc);
+		return -ENOMEM;
+	}
+
+	/* Job descriptor to perform unkeyed hash on key_in */
+	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
+			 OP_ALG_AS_INITFINAL);
+	append_seq_in_ptr(desc, src_dma, *keylen, 0);
+	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	result.err = 0;
+	init_completion(&result.completion);
+
+	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+	if (!ret) {
+		/* in progress */
+		wait_for_completion(&result.completion);
+		ret = result.err;
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR,
+			       "digested key@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+			       digestsize, 1);
+#endif
+	}
+	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+
+	*keylen = digestsize;
+
+	kfree(desc);
+
+	return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash,
+			const u8 *key, unsigned int keylen)
+{
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+	int ret;
+	u8 *hashed_key = NULL;
+
+#ifdef DEBUG
+	printk(KERN_ERR "keylen %d\n", keylen);
+#endif
+
+	if (keylen > blocksize) {
+		hashed_key = kmalloc_array(digestsize,
+					   sizeof(*hashed_key),
+					   GFP_KERNEL | GFP_DMA);
+		if (!hashed_key)
+			return -ENOMEM;
+		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+				      digestsize);
+		if (ret)
+			goto bad_free_key;
+		key = hashed_key;
+	}
+
+	/*
+	 * If DKP is supported, use it in the shared descriptor to generate
+	 * the split key.
+	 */
+	if (ctrlpriv->era >= 6) {
+		ctx->adata.key_inline = true;
+		ctx->adata.keylen = keylen;
+		ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+						      OP_ALG_ALGSEL_MASK);
+
+		if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+			goto bad_free_key;
+
+		memcpy(ctx->key, key, keylen);
+	} else {
+		ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
+				    keylen, CAAM_MAX_HASH_KEY_SIZE);
+		if (ret)
+			goto bad_free_key;
+	}
+
+	kfree(hashed_key);
+	return ahash_set_sh_desc(ahash);
+ bad_free_key:
+	kfree(hashed_key);
+	crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: physical mapped address of req->result
+ * @sec4_sg_dma: physical mapped address of h/w link table
+ * @src_nents: number of segments in input scatterlist
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ * @sec4_sg: h/w link table
+ */
+struct ahash_edesc {
+	dma_addr_t dst_dma;
+	dma_addr_t sec4_sg_dma;
+	int src_nents;
+	int sec4_sg_bytes;
+	u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
+	struct sec4_sg_entry sec4_sg[0];
+};
+
+static inline void ahash_unmap(struct device *dev,
+			struct ahash_edesc *edesc,
+			struct ahash_request *req, int dst_len)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	if (edesc->src_nents)
+		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+	if (edesc->dst_dma)
+		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+
+	if (edesc->sec4_sg_bytes)
+		dma_unmap_single(dev, edesc->sec4_sg_dma,
+				 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+
+	if (state->buf_dma) {
+		dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+				 DMA_TO_DEVICE);
+		state->buf_dma = 0;
+	}
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+			struct ahash_edesc *edesc,
+			struct ahash_request *req, int dst_len, u32 flag)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	if (state->ctx_dma) {
+		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+		state->ctx_dma = 0;
+	}
+	ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
+		       void *context)
+{
+	struct ahash_request *req = context;
+	struct ahash_edesc *edesc;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	ahash_unmap(jrdev, edesc, req, digestsize);
+	kfree(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+		       ctx->ctx_len, 1);
+	if (req->result)
+		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+			       digestsize, 1);
+#endif
+
+	req->base.complete(&req->base, err);
+}
+
+static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
+			    void *context)
+{
+	struct ahash_request *req = context;
+	struct ahash_edesc *edesc;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+#ifdef DEBUG
+	int digestsize = crypto_ahash_digestsize(ahash);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+	switch_buf(state);
+	kfree(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+		       ctx->ctx_len, 1);
+	if (req->result)
+		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+			       digestsize, 1);
+#endif
+
+	req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
+			       void *context)
+{
+	struct ahash_request *req = context;
+	struct ahash_edesc *edesc;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
+	kfree(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+		       ctx->ctx_len, 1);
+	if (req->result)
+		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+			       digestsize, 1);
+#endif
+
+	req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
+			       void *context)
+{
+	struct ahash_request *req = context;
+	struct ahash_edesc *edesc;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+#ifdef DEBUG
+	int digestsize = crypto_ahash_digestsize(ahash);
+
+	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+	switch_buf(state);
+	kfree(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+		       ctx->ctx_len, 1);
+	if (req->result)
+		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+			       digestsize, 1);
+#endif
+
+	req->base.complete(&req->base, err);
+}
+
+/*
+ * Allocate an enhanced descriptor, which contains the hardware descriptor
+ * and space for hardware scatter table containing sg_num entries.
+ */
+static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
+					     int sg_num, u32 *sh_desc,
+					     dma_addr_t sh_desc_dma,
+					     gfp_t flags)
+{
+	struct ahash_edesc *edesc;
+	unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
+
+	edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+		return NULL;
+	}
+
+	init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
+			     HDR_SHARE_DEFER | HDR_REVERSE);
+
+	return edesc;
+}
+
+static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
+			       struct ahash_edesc *edesc,
+			       struct ahash_request *req, int nents,
+			       unsigned int first_sg,
+			       unsigned int first_bytes, size_t to_hash)
+{
+	dma_addr_t src_dma;
+	u32 options;
+
+	if (nents > 1 || first_sg) {
+		struct sec4_sg_entry *sg = edesc->sec4_sg;
+		unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
+
+		sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+
+		src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(ctx->jrdev, src_dma)) {
+			dev_err(ctx->jrdev, "unable to map S/G table\n");
+			return -ENOMEM;
+		}
+
+		edesc->sec4_sg_bytes = sgsize;
+		edesc->sec4_sg_dma = src_dma;
+		options = LDST_SGF;
+	} else {
+		src_dma = sg_dma_address(req->src);
+		options = 0;
+	}
+
+	append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
+			  options);
+
+	return 0;
+}
+
+/* submit update job descriptor */
+static int ahash_update_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u8 *buf = current_buf(state);
+	int *buflen = current_buflen(state);
+	u8 *next_buf = alt_buf(state);
+	int *next_buflen = alt_buflen(state), last_buflen;
+	int in_len = *buflen + req->nbytes, to_hash;
+	u32 *desc;
+	int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
+	struct ahash_edesc *edesc;
+	int ret = 0;
+
+	last_buflen = *next_buflen;
+	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+	to_hash = in_len - *next_buflen;
+
+	if (to_hash) {
+		src_nents = sg_nents_for_len(req->src,
+					     req->nbytes - (*next_buflen));
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to DMA map source\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
+
+		sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
+		sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
+				 sizeof(struct sec4_sg_entry);
+
+		/*
+		 * allocate space for base edesc and hw desc commands,
+		 * link tables
+		 */
+		edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+					  ctx->sh_desc_update,
+					  ctx->sh_desc_update_dma, flags);
+		if (!edesc) {
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+
+		edesc->src_nents = src_nents;
+		edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+		ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+					 edesc->sec4_sg, DMA_BIDIRECTIONAL);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+		if (ret)
+			goto unmap_ctx;
+
+		if (mapped_nents) {
+			sg_to_sec4_sg_last(req->src, mapped_nents,
+					   edesc->sec4_sg + sec4_sg_src_index,
+					   0);
+			if (*next_buflen)
+				scatterwalk_map_and_copy(next_buf, req->src,
+							 to_hash - *buflen,
+							 *next_buflen, 0);
+		} else {
+			sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
+					    1);
+		}
+
+		desc = edesc->hw_desc;
+
+		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+						     sec4_sg_bytes,
+						     DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+			dev_err(jrdev, "unable to map S/G table\n");
+			ret = -ENOMEM;
+			goto unmap_ctx;
+		}
+
+		append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+				       to_hash, LDST_SGF);
+
+		append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
+
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			       desc_bytes(desc), 1);
+#endif
+
+		ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
+	} else if (*next_buflen) {
+		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+					 req->nbytes, 0);
+		*buflen = *next_buflen;
+		*next_buflen = last_buflen;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+		       *next_buflen, 1);
+#endif
+
+	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+	kfree(edesc);
+	return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int buflen = *current_buflen(state);
+	u32 *desc;
+	int sec4_sg_bytes, sec4_sg_src_index;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct ahash_edesc *edesc;
+	int ret;
+
+	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
+				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
+				  flags);
+	if (!edesc)
+		return -ENOMEM;
+
+	desc = edesc->hw_desc;
+
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+				 edesc->sec4_sg, DMA_TO_DEVICE);
+	if (ret)
+		goto unmap_ctx;
+
+	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+	if (ret)
+		goto unmap_ctx;
+
+	sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		ret = -ENOMEM;
+		goto unmap_ctx;
+	}
+
+	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
+			  LDST_SGF);
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		ret = -ENOMEM;
+		goto unmap_ctx;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+	if (ret)
+		goto unmap_ctx;
+
+	return -EINPROGRESS;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	kfree(edesc);
+	return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int buflen = *current_buflen(state);
+	u32 *desc;
+	int sec4_sg_src_index;
+	int src_nents, mapped_nents;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct ahash_edesc *edesc;
+	int ret;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
+
+	if (src_nents) {
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to DMA map source\n");
+			return -ENOMEM;
+		}
+	} else {
+		mapped_nents = 0;
+	}
+
+	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
+				  flags);
+	if (!edesc) {
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	desc = edesc->hw_desc;
+
+	edesc->src_nents = src_nents;
+
+	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+				 edesc->sec4_sg, DMA_TO_DEVICE);
+	if (ret)
+		goto unmap_ctx;
+
+	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+	if (ret)
+		goto unmap_ctx;
+
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
+				  sec4_sg_src_index, ctx->ctx_len + buflen,
+				  req->nbytes);
+	if (ret)
+		goto unmap_ctx;
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		ret = -ENOMEM;
+		goto unmap_ctx;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+	if (ret)
+		goto unmap_ctx;
+
+	return -EINPROGRESS;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	kfree(edesc);
+	return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u32 *desc;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	int src_nents, mapped_nents;
+	struct ahash_edesc *edesc;
+	int ret;
+
+	state->buf_dma = 0;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
+
+	if (src_nents) {
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to map source for DMA\n");
+			return -ENOMEM;
+		}
+	} else {
+		mapped_nents = 0;
+	}
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
+				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+				  flags);
+	if (!edesc) {
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	edesc->src_nents = src_nents;
+
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+				  req->nbytes);
+	if (ret) {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+		return ret;
+	}
+
+	desc = edesc->hw_desc;
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+		return -ENOMEM;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+/* submit ahash final if it the first job descriptor */
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u8 *buf = current_buf(state);
+	int buflen = *current_buflen(state);
+	u32 *desc;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct ahash_edesc *edesc;
+	int ret;
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
+				  ctx->sh_desc_digest_dma, flags);
+	if (!edesc)
+		return -ENOMEM;
+
+	desc = edesc->hw_desc;
+
+	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, state->buf_dma)) {
+		dev_err(jrdev, "unable to map src\n");
+		goto unmap;
+	}
+
+	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		goto unmap;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+	}
+
+	return ret;
+ unmap:
+	ahash_unmap(jrdev, edesc, req, digestsize);
+	kfree(edesc);
+	return -ENOMEM;
+
+}
+
+/* submit ahash update if it the first job descriptor after update */
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u8 *buf = current_buf(state);
+	int *buflen = current_buflen(state);
+	u8 *next_buf = alt_buf(state);
+	int *next_buflen = alt_buflen(state);
+	int in_len = *buflen + req->nbytes, to_hash;
+	int sec4_sg_bytes, src_nents, mapped_nents;
+	struct ahash_edesc *edesc;
+	u32 *desc;
+	int ret = 0;
+
+	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+	to_hash = in_len - *next_buflen;
+
+	if (to_hash) {
+		src_nents = sg_nents_for_len(req->src,
+					     req->nbytes - *next_buflen);
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to DMA map source\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
+
+		sec4_sg_bytes = (1 + mapped_nents) *
+				sizeof(struct sec4_sg_entry);
+
+		/*
+		 * allocate space for base edesc and hw desc commands,
+		 * link tables
+		 */
+		edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
+					  ctx->sh_desc_update_first,
+					  ctx->sh_desc_update_first_dma,
+					  flags);
+		if (!edesc) {
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+
+		edesc->src_nents = src_nents;
+		edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+		ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
+		if (ret)
+			goto unmap_ctx;
+
+		sg_to_sec4_sg_last(req->src, mapped_nents,
+				   edesc->sec4_sg + 1, 0);
+
+		if (*next_buflen) {
+			scatterwalk_map_and_copy(next_buf, req->src,
+						 to_hash - *buflen,
+						 *next_buflen, 0);
+		}
+
+		desc = edesc->hw_desc;
+
+		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+						    sec4_sg_bytes,
+						    DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+			dev_err(jrdev, "unable to map S/G table\n");
+			ret = -ENOMEM;
+			goto unmap_ctx;
+		}
+
+		append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
+
+		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+		if (ret)
+			goto unmap_ctx;
+
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			       desc_bytes(desc), 1);
+#endif
+
+		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
+		state->update = ahash_update_ctx;
+		state->finup = ahash_finup_ctx;
+		state->final = ahash_final_ctx;
+	} else if (*next_buflen) {
+		scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+					 req->nbytes, 0);
+		*buflen = *next_buflen;
+		*next_buflen = 0;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+		       *next_buflen, 1);
+#endif
+
+	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	kfree(edesc);
+	return ret;
+}
+
+/* submit ahash finup if it the first job descriptor after update */
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int buflen = *current_buflen(state);
+	u32 *desc;
+	int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct ahash_edesc *edesc;
+	int ret;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	if (src_nents < 0) {
+		dev_err(jrdev, "Invalid number of src SG.\n");
+		return src_nents;
+	}
+
+	if (src_nents) {
+		mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+					  DMA_TO_DEVICE);
+		if (!mapped_nents) {
+			dev_err(jrdev, "unable to DMA map source\n");
+			return -ENOMEM;
+		}
+	} else {
+		mapped_nents = 0;
+	}
+
+	sec4_sg_src_index = 2;
+	sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
+			 sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+				  ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+				  flags);
+	if (!edesc) {
+		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	desc = edesc->hw_desc;
+
+	edesc->src_nents = src_nents;
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+	ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
+	if (ret)
+		goto unmap;
+
+	ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
+				  req->nbytes);
+	if (ret) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		goto unmap;
+	}
+
+	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+						digestsize);
+	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+		dev_err(jrdev, "unable to map dst\n");
+		goto unmap;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ahash_unmap(jrdev, edesc, req, digestsize);
+		kfree(edesc);
+	}
+
+	return ret;
+ unmap:
+	ahash_unmap(jrdev, edesc, req, digestsize);
+	kfree(edesc);
+	return -ENOMEM;
+
+}
+
+/* submit first update job descriptor after init */
+static int ahash_update_first(struct ahash_request *req)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	u8 *next_buf = alt_buf(state);
+	int *next_buflen = alt_buflen(state);
+	int to_hash;
+	u32 *desc;
+	int src_nents, mapped_nents;
+	struct ahash_edesc *edesc;
+	int ret = 0;
+
+	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+				      1);
+	to_hash = req->nbytes - *next_buflen;
+
+	if (to_hash) {
+		src_nents = sg_nents_for_len(req->src,
+					     req->nbytes - *next_buflen);
+		if (src_nents < 0) {
+			dev_err(jrdev, "Invalid number of src SG.\n");
+			return src_nents;
+		}
+
+		if (src_nents) {
+			mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+						  DMA_TO_DEVICE);
+			if (!mapped_nents) {
+				dev_err(jrdev, "unable to map source for DMA\n");
+				return -ENOMEM;
+			}
+		} else {
+			mapped_nents = 0;
+		}
+
+		/*
+		 * allocate space for base edesc and hw desc commands,
+		 * link tables
+		 */
+		edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
+					  mapped_nents : 0,
+					  ctx->sh_desc_update_first,
+					  ctx->sh_desc_update_first_dma,
+					  flags);
+		if (!edesc) {
+			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+			return -ENOMEM;
+		}
+
+		edesc->src_nents = src_nents;
+
+		ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+					  to_hash);
+		if (ret)
+			goto unmap_ctx;
+
+		if (*next_buflen)
+			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+						 *next_buflen, 0);
+
+		desc = edesc->hw_desc;
+
+		ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+		if (ret)
+			goto unmap_ctx;
+
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			       desc_bytes(desc), 1);
+#endif
+
+		ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+		if (ret)
+			goto unmap_ctx;
+
+		ret = -EINPROGRESS;
+		state->update = ahash_update_ctx;
+		state->finup = ahash_finup_ctx;
+		state->final = ahash_final_ctx;
+	} else if (*next_buflen) {
+		state->update = ahash_update_no_ctx;
+		state->finup = ahash_finup_no_ctx;
+		state->final = ahash_final_no_ctx;
+		scatterwalk_map_and_copy(next_buf, req->src, 0,
+					 req->nbytes, 0);
+		switch_buf(state);
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+		       *next_buflen, 1);
+#endif
+
+	return ret;
+ unmap_ctx:
+	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	kfree(edesc);
+	return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+	return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	state->update = ahash_update_first;
+	state->finup = ahash_finup_first;
+	state->final = ahash_final_no_ctx;
+
+	state->ctx_dma = 0;
+	state->current_buf = 0;
+	state->buf_dma = 0;
+	state->buflen_0 = 0;
+	state->buflen_1 = 0;
+
+	return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+
+	return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	struct caam_export_state *export = out;
+	int len;
+	u8 *buf;
+
+	if (state->current_buf) {
+		buf = state->buf_1;
+		len = state->buflen_1;
+	} else {
+		buf = state->buf_0;
+		len = state->buflen_0;
+	}
+
+	memcpy(export->buf, buf, len);
+	memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
+	export->buflen = len;
+	export->update = state->update;
+	export->final = state->final;
+	export->finup = state->finup;
+
+	return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+	struct caam_hash_state *state = ahash_request_ctx(req);
+	const struct caam_export_state *export = in;
+
+	memset(state, 0, sizeof(*state));
+	memcpy(state->buf_0, export->buf, export->buflen);
+	memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
+	state->buflen_0 = export->buflen;
+	state->update = export->update;
+	state->final = export->final;
+	state->finup = export->finup;
+
+	return 0;
+}
+
+struct caam_hash_template {
+	char name[CRYPTO_MAX_ALG_NAME];
+	char driver_name[CRYPTO_MAX_ALG_NAME];
+	char hmac_name[CRYPTO_MAX_ALG_NAME];
+	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int blocksize;
+	struct ahash_alg template_ahash;
+	u32 alg_type;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+	{
+		.name = "sha1",
+		.driver_name = "sha1-caam",
+		.hmac_name = "hmac(sha1)",
+		.hmac_driver_name = "hmac-sha1-caam",
+		.blocksize = SHA1_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA1_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA1,
+	}, {
+		.name = "sha224",
+		.driver_name = "sha224-caam",
+		.hmac_name = "hmac(sha224)",
+		.hmac_driver_name = "hmac-sha224-caam",
+		.blocksize = SHA224_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA224_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA224,
+	}, {
+		.name = "sha256",
+		.driver_name = "sha256-caam",
+		.hmac_name = "hmac(sha256)",
+		.hmac_driver_name = "hmac-sha256-caam",
+		.blocksize = SHA256_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA256_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA256,
+	}, {
+		.name = "sha384",
+		.driver_name = "sha384-caam",
+		.hmac_name = "hmac(sha384)",
+		.hmac_driver_name = "hmac-sha384-caam",
+		.blocksize = SHA384_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA384_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA384,
+	}, {
+		.name = "sha512",
+		.driver_name = "sha512-caam",
+		.hmac_name = "hmac(sha512)",
+		.hmac_driver_name = "hmac-sha512-caam",
+		.blocksize = SHA512_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = SHA512_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_SHA512,
+	}, {
+		.name = "md5",
+		.driver_name = "md5-caam",
+		.hmac_name = "hmac(md5)",
+		.hmac_driver_name = "hmac-md5-caam",
+		.blocksize = MD5_BLOCK_WORDS * 4,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = ahash_setkey,
+			.halg = {
+				.digestsize = MD5_DIGEST_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		},
+		.alg_type = OP_ALG_ALGSEL_MD5,
+	},
+};
+
+struct caam_hash_alg {
+	struct list_head entry;
+	int alg_type;
+	struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct crypto_alg *base = tfm->__crt_alg;
+	struct hash_alg_common *halg =
+		 container_of(base, struct hash_alg_common, base);
+	struct ahash_alg *alg =
+		 container_of(halg, struct ahash_alg, halg);
+	struct caam_hash_alg *caam_hash =
+		 container_of(alg, struct caam_hash_alg, ahash_alg);
+	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+	/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+	static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+					 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+					 HASH_MSG_LEN + 32,
+					 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+					 HASH_MSG_LEN + 64,
+					 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+	dma_addr_t dma_addr;
+	struct caam_drv_private *priv;
+
+	/*
+	 * Get a Job ring from Job Ring driver to ensure in-order
+	 * crypto request processing per tfm
+	 */
+	ctx->jrdev = caam_jr_alloc();
+	if (IS_ERR(ctx->jrdev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(ctx->jrdev);
+	}
+
+	priv = dev_get_drvdata(ctx->jrdev->parent);
+	ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
+					offsetof(struct caam_hash_ctx,
+						 sh_desc_update_dma),
+					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+		caam_jr_free(ctx->jrdev);
+		return -ENOMEM;
+	}
+
+	ctx->sh_desc_update_dma = dma_addr;
+	ctx->sh_desc_update_first_dma = dma_addr +
+					offsetof(struct caam_hash_ctx,
+						 sh_desc_update_first);
+	ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
+						   sh_desc_fin);
+	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
+						      sh_desc_digest);
+
+	/* copy descriptor header template value */
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+
+	ctx->ctx_len = runninglen[(ctx->adata.algtype &
+				   OP_ALG_ALGSEL_SUBMASK) >>
+				  OP_ALG_ALGSEL_SHIFT];
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct caam_hash_state));
+	return ahash_set_sh_desc(ahash);
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
+			       offsetof(struct caam_hash_ctx,
+					sh_desc_update_dma),
+			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	caam_jr_free(ctx->jrdev);
+}
+
+static void __exit caam_algapi_hash_exit(void)
+{
+	struct caam_hash_alg *t_alg, *n;
+
+	if (!hash_list.next)
+		return;
+
+	list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
+		crypto_unregister_ahash(&t_alg->ahash_alg);
+		list_del(&t_alg->entry);
+		kfree(t_alg);
+	}
+}
+
+static struct caam_hash_alg *
+caam_hash_alloc(struct caam_hash_template *template,
+		bool keyed)
+{
+	struct caam_hash_alg *t_alg;
+	struct ahash_alg *halg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg) {
+		pr_err("failed to allocate t_alg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	t_alg->ahash_alg = template->template_ahash;
+	halg = &t_alg->ahash_alg;
+	alg = &halg->halg.base;
+
+	if (keyed) {
+		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->hmac_name);
+		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->hmac_driver_name);
+	} else {
+		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->name);
+		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+			 template->driver_name);
+		t_alg->ahash_alg.setkey = NULL;
+	}
+	alg->cra_module = THIS_MODULE;
+	alg->cra_init = caam_hash_cra_init;
+	alg->cra_exit = caam_hash_cra_exit;
+	alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+	alg->cra_priority = CAAM_CRA_PRIORITY;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_flags = CRYPTO_ALG_ASYNC;
+
+	t_alg->alg_type = template->alg_type;
+
+	return t_alg;
+}
+
+static int __init caam_algapi_hash_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	int i = 0, err = 0;
+	struct caam_drv_private *priv;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
+	u32 cha_inst, cha_vid;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+	/*
+	 * Register crypto algorithms the device supports.  First, identify
+	 * presence and attributes of MD block.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+
+	/*
+	 * Skip registration of any hashing algorithms if MD block
+	 * is not present.
+	 */
+	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+		return -ENODEV;
+
+	/* Limit digest size based on LP256 */
+	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+		md_limit = SHA256_DIGEST_SIZE;
+
+	INIT_LIST_HEAD(&hash_list);
+
+	/* register crypto algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+		struct caam_hash_alg *t_alg;
+		struct caam_hash_template *alg = driver_hash + i;
+
+		/* If MD size is not supported by device, skip registration */
+		if (alg->template_ahash.halg.digestsize > md_limit)
+			continue;
+
+		/* register hmac version */
+		t_alg = caam_hash_alloc(alg, true);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
+			continue;
+		}
+
+		err = crypto_register_ahash(&t_alg->ahash_alg);
+		if (err) {
+			pr_warn("%s alg registration failed: %d\n",
+				t_alg->ahash_alg.halg.base.cra_driver_name,
+				err);
+			kfree(t_alg);
+		} else
+			list_add_tail(&t_alg->entry, &hash_list);
+
+		/* register unkeyed version */
+		t_alg = caam_hash_alloc(alg, false);
+		if (IS_ERR(t_alg)) {
+			err = PTR_ERR(t_alg);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
+			continue;
+		}
+
+		err = crypto_register_ahash(&t_alg->ahash_alg);
+		if (err) {
+			pr_warn("%s alg registration failed: %d\n",
+				t_alg->ahash_alg.halg.base.cra_driver_name,
+				err);
+			kfree(t_alg);
+		} else
+			list_add_tail(&t_alg->entry, &hash_list);
+	}
+
+	return err;
+}
+
+module_init(caam_algapi_hash_init);
+module_exit(caam_algapi_hash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
new file mode 100644
index 0000000..f26d62e
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.c
@@ -0,0 +1,1074 @@
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "sg_sw_sec4.h"
+#include "caampkc.h"
+
+#define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
+#define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
+				 sizeof(struct rsa_priv_f1_pdb))
+#define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
+				 sizeof(struct rsa_priv_f2_pdb))
+#define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
+				 sizeof(struct rsa_priv_f3_pdb))
+
+static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
+			 struct akcipher_request *req)
+{
+	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
+	dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+
+	if (edesc->sec4_sg_bytes)
+		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
+				 DMA_TO_DEVICE);
+}
+
+static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
+			  struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
+
+	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
+}
+
+static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
+			      struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
+
+	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+}
+
+static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
+			      struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+	size_t p_sz = key->p_sz;
+	size_t q_sz = key->q_sz;
+
+	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
+}
+
+static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
+			      struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+	size_t p_sz = key->p_sz;
+	size_t q_sz = key->q_sz;
+
+	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
+	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
+}
+
+/* RSA Job Completion handler */
+static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
+{
+	struct akcipher_request *req = context;
+	struct rsa_edesc *edesc;
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+	rsa_pub_unmap(dev, edesc, req);
+	rsa_io_unmap(dev, edesc, req);
+	kfree(edesc);
+
+	akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
+			     void *context)
+{
+	struct akcipher_request *req = context;
+	struct rsa_edesc *edesc;
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+	rsa_priv_f1_unmap(dev, edesc, req);
+	rsa_io_unmap(dev, edesc, req);
+	kfree(edesc);
+
+	akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
+			     void *context)
+{
+	struct akcipher_request *req = context;
+	struct rsa_edesc *edesc;
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+	rsa_priv_f2_unmap(dev, edesc, req);
+	rsa_io_unmap(dev, edesc, req);
+	kfree(edesc);
+
+	akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
+			     void *context)
+{
+	struct akcipher_request *req = context;
+	struct rsa_edesc *edesc;
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+	rsa_priv_f3_unmap(dev, edesc, req);
+	rsa_io_unmap(dev, edesc, req);
+	kfree(edesc);
+
+	akcipher_request_complete(req, err);
+}
+
+static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
+					unsigned int nbytes,
+					unsigned int flags)
+{
+	struct sg_mapping_iter miter;
+	int lzeros, ents;
+	unsigned int len;
+	unsigned int tbytes = nbytes;
+	const u8 *buff;
+
+	ents = sg_nents_for_len(sgl, nbytes);
+	if (ents < 0)
+		return ents;
+
+	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
+
+	lzeros = 0;
+	len = 0;
+	while (nbytes > 0) {
+		while (len && !*buff) {
+			lzeros++;
+			len--;
+			buff++;
+		}
+
+		if (len && *buff)
+			break;
+
+		sg_miter_next(&miter);
+		buff = miter.addr;
+		len = miter.length;
+
+		nbytes -= lzeros;
+		lzeros = 0;
+	}
+
+	miter.consumed = lzeros;
+	sg_miter_stop(&miter);
+	nbytes -= lzeros;
+
+	return tbytes - nbytes;
+}
+
+static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+					 size_t desclen)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *dev = ctx->dev;
+	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+	struct rsa_edesc *edesc;
+	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
+	int sgc;
+	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+	int src_nents, dst_nents;
+	int lzeros;
+
+	lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
+	if (lzeros < 0)
+		return ERR_PTR(lzeros);
+
+	req->src_len -= lzeros;
+	req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+
+	src_nents = sg_nents_for_len(req->src, req->src_len);
+	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
+
+	if (src_nents > 1)
+		sec4_sg_len = src_nents;
+	if (dst_nents > 1)
+		sec4_sg_len += dst_nents;
+
+	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc, hw desc commands and link tables */
+	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
+			GFP_DMA | flags);
+	if (!edesc)
+		return ERR_PTR(-ENOMEM);
+
+	sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+	if (unlikely(!sgc)) {
+		dev_err(dev, "unable to map source\n");
+		goto src_fail;
+	}
+
+	sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+	if (unlikely(!sgc)) {
+		dev_err(dev, "unable to map destination\n");
+		goto dst_fail;
+	}
+
+	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
+
+	sec4_sg_index = 0;
+	if (src_nents > 1) {
+		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+		sec4_sg_index += src_nents;
+	}
+	if (dst_nents > 1)
+		sg_to_sec4_sg_last(req->dst, dst_nents,
+				   edesc->sec4_sg + sec4_sg_index, 0);
+
+	/* Save nents for later use in Job Descriptor */
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+
+	if (!sec4_sg_bytes)
+		return edesc;
+
+	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
+		dev_err(dev, "unable to map S/G table\n");
+		goto sec4_sg_fail;
+	}
+
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+	return edesc;
+
+sec4_sg_fail:
+	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+dst_fail:
+	dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+src_fail:
+	kfree(edesc);
+	return ERR_PTR(-ENOMEM);
+}
+
+static int set_rsa_pub_pdb(struct akcipher_request *req,
+			   struct rsa_edesc *edesc)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *dev = ctx->dev;
+	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
+	int sec4_sg_index = 0;
+
+	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->n_dma)) {
+		dev_err(dev, "Unable to map RSA modulus memory\n");
+		return -ENOMEM;
+	}
+
+	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->e_dma)) {
+		dev_err(dev, "Unable to map RSA public exponent memory\n");
+		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	if (edesc->src_nents > 1) {
+		pdb->sgf |= RSA_PDB_SGF_F;
+		pdb->f_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+	} else {
+		pdb->f_dma = sg_dma_address(req->src);
+	}
+
+	if (edesc->dst_nents > 1) {
+		pdb->sgf |= RSA_PDB_SGF_G;
+		pdb->g_dma = edesc->sec4_sg_dma +
+			     sec4_sg_index * sizeof(struct sec4_sg_entry);
+	} else {
+		pdb->g_dma = sg_dma_address(req->dst);
+	}
+
+	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
+	pdb->f_len = req->src_len;
+
+	return 0;
+}
+
+static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
+			       struct rsa_edesc *edesc)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *dev = ctx->dev;
+	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
+	int sec4_sg_index = 0;
+
+	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->n_dma)) {
+		dev_err(dev, "Unable to map modulus memory\n");
+		return -ENOMEM;
+	}
+
+	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->d_dma)) {
+		dev_err(dev, "Unable to map RSA private exponent memory\n");
+		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+		return -ENOMEM;
+	}
+
+	if (edesc->src_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+		pdb->g_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+	} else {
+		pdb->g_dma = sg_dma_address(req->src);
+	}
+
+	if (edesc->dst_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+		pdb->f_dma = edesc->sec4_sg_dma +
+			     sec4_sg_index * sizeof(struct sec4_sg_entry);
+	} else {
+		pdb->f_dma = sg_dma_address(req->dst);
+	}
+
+	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
+
+	return 0;
+}
+
+static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
+			       struct rsa_edesc *edesc)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *dev = ctx->dev;
+	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+	int sec4_sg_index = 0;
+	size_t p_sz = key->p_sz;
+	size_t q_sz = key->q_sz;
+
+	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->d_dma)) {
+		dev_err(dev, "Unable to map RSA private exponent memory\n");
+		return -ENOMEM;
+	}
+
+	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->p_dma)) {
+		dev_err(dev, "Unable to map RSA prime factor p memory\n");
+		goto unmap_d;
+	}
+
+	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->q_dma)) {
+		dev_err(dev, "Unable to map RSA prime factor q memory\n");
+		goto unmap_p;
+	}
+
+	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
+		dev_err(dev, "Unable to map RSA tmp1 memory\n");
+		goto unmap_q;
+	}
+
+	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
+		dev_err(dev, "Unable to map RSA tmp2 memory\n");
+		goto unmap_tmp1;
+	}
+
+	if (edesc->src_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+		pdb->g_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+	} else {
+		pdb->g_dma = sg_dma_address(req->src);
+	}
+
+	if (edesc->dst_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+		pdb->f_dma = edesc->sec4_sg_dma +
+			     sec4_sg_index * sizeof(struct sec4_sg_entry);
+	} else {
+		pdb->f_dma = sg_dma_address(req->dst);
+	}
+
+	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
+	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
+
+	return 0;
+
+unmap_tmp1:
+	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+unmap_q:
+	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+unmap_p:
+	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+unmap_d:
+	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+
+	return -ENOMEM;
+}
+
+static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
+			       struct rsa_edesc *edesc)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *dev = ctx->dev;
+	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+	int sec4_sg_index = 0;
+	size_t p_sz = key->p_sz;
+	size_t q_sz = key->q_sz;
+
+	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->p_dma)) {
+		dev_err(dev, "Unable to map RSA prime factor p memory\n");
+		return -ENOMEM;
+	}
+
+	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->q_dma)) {
+		dev_err(dev, "Unable to map RSA prime factor q memory\n");
+		goto unmap_p;
+	}
+
+	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->dp_dma)) {
+		dev_err(dev, "Unable to map RSA exponent dp memory\n");
+		goto unmap_q;
+	}
+
+	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->dq_dma)) {
+		dev_err(dev, "Unable to map RSA exponent dq memory\n");
+		goto unmap_dp;
+	}
+
+	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, pdb->c_dma)) {
+		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
+		goto unmap_dq;
+	}
+
+	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
+		dev_err(dev, "Unable to map RSA tmp1 memory\n");
+		goto unmap_qinv;
+	}
+
+	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
+		dev_err(dev, "Unable to map RSA tmp2 memory\n");
+		goto unmap_tmp1;
+	}
+
+	if (edesc->src_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+		pdb->g_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+	} else {
+		pdb->g_dma = sg_dma_address(req->src);
+	}
+
+	if (edesc->dst_nents > 1) {
+		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+		pdb->f_dma = edesc->sec4_sg_dma +
+			     sec4_sg_index * sizeof(struct sec4_sg_entry);
+	} else {
+		pdb->f_dma = sg_dma_address(req->dst);
+	}
+
+	pdb->sgf |= key->n_sz;
+	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
+
+	return 0;
+
+unmap_tmp1:
+	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+unmap_qinv:
+	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
+unmap_dq:
+	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
+unmap_dp:
+	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
+unmap_q:
+	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+unmap_p:
+	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+
+	return -ENOMEM;
+}
+
+static int caam_rsa_enc(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	struct device *jrdev = ctx->dev;
+	struct rsa_edesc *edesc;
+	int ret;
+
+	if (unlikely(!key->n || !key->e))
+		return -EINVAL;
+
+	if (req->dst_len < key->n_sz) {
+		req->dst_len = key->n_sz;
+		dev_err(jrdev, "Output buffer length less than parameter n\n");
+		return -EOVERFLOW;
+	}
+
+	/* Allocate extended descriptor */
+	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Set RSA Encrypt Protocol Data Block */
+	ret = set_rsa_pub_pdb(req, edesc);
+	if (ret)
+		goto init_fail;
+
+	/* Initialize Job Descriptor */
+	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
+
+	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
+	if (!ret)
+		return -EINPROGRESS;
+
+	rsa_pub_unmap(jrdev, edesc, req);
+
+init_fail:
+	rsa_io_unmap(jrdev, edesc, req);
+	kfree(edesc);
+	return ret;
+}
+
+static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *jrdev = ctx->dev;
+	struct rsa_edesc *edesc;
+	int ret;
+
+	/* Allocate extended descriptor */
+	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
+	ret = set_rsa_priv_f1_pdb(req, edesc);
+	if (ret)
+		goto init_fail;
+
+	/* Initialize Job Descriptor */
+	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
+
+	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
+	if (!ret)
+		return -EINPROGRESS;
+
+	rsa_priv_f1_unmap(jrdev, edesc, req);
+
+init_fail:
+	rsa_io_unmap(jrdev, edesc, req);
+	kfree(edesc);
+	return ret;
+}
+
+static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *jrdev = ctx->dev;
+	struct rsa_edesc *edesc;
+	int ret;
+
+	/* Allocate extended descriptor */
+	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
+	ret = set_rsa_priv_f2_pdb(req, edesc);
+	if (ret)
+		goto init_fail;
+
+	/* Initialize Job Descriptor */
+	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
+
+	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
+	if (!ret)
+		return -EINPROGRESS;
+
+	rsa_priv_f2_unmap(jrdev, edesc, req);
+
+init_fail:
+	rsa_io_unmap(jrdev, edesc, req);
+	kfree(edesc);
+	return ret;
+}
+
+static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *jrdev = ctx->dev;
+	struct rsa_edesc *edesc;
+	int ret;
+
+	/* Allocate extended descriptor */
+	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
+	ret = set_rsa_priv_f3_pdb(req, edesc);
+	if (ret)
+		goto init_fail;
+
+	/* Initialize Job Descriptor */
+	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
+
+	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
+	if (!ret)
+		return -EINPROGRESS;
+
+	rsa_priv_f3_unmap(jrdev, edesc, req);
+
+init_fail:
+	rsa_io_unmap(jrdev, edesc, req);
+	kfree(edesc);
+	return ret;
+}
+
+static int caam_rsa_dec(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+	int ret;
+
+	if (unlikely(!key->n || !key->d))
+		return -EINVAL;
+
+	if (req->dst_len < key->n_sz) {
+		req->dst_len = key->n_sz;
+		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
+		return -EOVERFLOW;
+	}
+
+	if (key->priv_form == FORM3)
+		ret = caam_rsa_dec_priv_f3(req);
+	else if (key->priv_form == FORM2)
+		ret = caam_rsa_dec_priv_f2(req);
+	else
+		ret = caam_rsa_dec_priv_f1(req);
+
+	return ret;
+}
+
+static void caam_rsa_free_key(struct caam_rsa_key *key)
+{
+	kzfree(key->d);
+	kzfree(key->p);
+	kzfree(key->q);
+	kzfree(key->dp);
+	kzfree(key->dq);
+	kzfree(key->qinv);
+	kzfree(key->tmp1);
+	kzfree(key->tmp2);
+	kfree(key->e);
+	kfree(key->n);
+	memset(key, 0, sizeof(*key));
+}
+
+static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
+{
+	while (!**ptr && *nbytes) {
+		(*ptr)++;
+		(*nbytes)--;
+	}
+}
+
+/**
+ * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
+ * dP, dQ and qInv could decode to less than corresponding p, q length, as the
+ * BER-encoding requires that the minimum number of bytes be used to encode the
+ * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
+ * length.
+ *
+ * @ptr   : pointer to {dP, dQ, qInv} CRT member
+ * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
+ * @dstlen: length in bytes of corresponding p or q prime factor
+ */
+static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
+{
+	u8 *dst;
+
+	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
+	if (!nbytes)
+		return NULL;
+
+	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
+	if (!dst)
+		return NULL;
+
+	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
+
+	return dst;
+}
+
+/**
+ * caam_read_raw_data - Read a raw byte stream as a positive integer.
+ * The function skips buffer's leading zeros, copies the remained data
+ * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
+ * the address of the new buffer.
+ *
+ * @buf   : The data to read
+ * @nbytes: The amount of data to read
+ */
+static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
+{
+
+	caam_rsa_drop_leading_zeros(&buf, nbytes);
+	if (!*nbytes)
+		return NULL;
+
+	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
+}
+
+static int caam_rsa_check_key_length(unsigned int len)
+{
+	if (len > 4096)
+		return -EINVAL;
+	return 0;
+}
+
+static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+				unsigned int keylen)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct rsa_key raw_key = {NULL};
+	struct caam_rsa_key *rsa_key = &ctx->key;
+	int ret;
+
+	/* Free the old RSA key if any */
+	caam_rsa_free_key(rsa_key);
+
+	ret = rsa_parse_pub_key(&raw_key, key, keylen);
+	if (ret)
+		return ret;
+
+	/* Copy key in DMA zone */
+	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->e)
+		goto err;
+
+	/*
+	 * Skip leading zeros and copy the positive integer to a buffer
+	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+	 * expects a positive integer for the RSA modulus and uses its length as
+	 * decryption output length.
+	 */
+	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
+	if (!rsa_key->n)
+		goto err;
+
+	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
+		caam_rsa_free_key(rsa_key);
+		return -EINVAL;
+	}
+
+	rsa_key->e_sz = raw_key.e_sz;
+	rsa_key->n_sz = raw_key.n_sz;
+
+	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
+	return 0;
+err:
+	caam_rsa_free_key(rsa_key);
+	return -ENOMEM;
+}
+
+static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+				       struct rsa_key *raw_key)
+{
+	struct caam_rsa_key *rsa_key = &ctx->key;
+	size_t p_sz = raw_key->p_sz;
+	size_t q_sz = raw_key->q_sz;
+
+	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
+	if (!rsa_key->p)
+		return;
+	rsa_key->p_sz = p_sz;
+
+	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
+	if (!rsa_key->q)
+		goto free_p;
+	rsa_key->q_sz = q_sz;
+
+	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->tmp1)
+		goto free_q;
+
+	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->tmp2)
+		goto free_tmp1;
+
+	rsa_key->priv_form = FORM2;
+
+	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
+	if (!rsa_key->dp)
+		goto free_tmp2;
+
+	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
+	if (!rsa_key->dq)
+		goto free_dp;
+
+	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
+					  q_sz);
+	if (!rsa_key->qinv)
+		goto free_dq;
+
+	rsa_key->priv_form = FORM3;
+
+	return;
+
+free_dq:
+	kzfree(rsa_key->dq);
+free_dp:
+	kzfree(rsa_key->dp);
+free_tmp2:
+	kzfree(rsa_key->tmp2);
+free_tmp1:
+	kzfree(rsa_key->tmp1);
+free_q:
+	kzfree(rsa_key->q);
+free_p:
+	kzfree(rsa_key->p);
+}
+
+static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+				 unsigned int keylen)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct rsa_key raw_key = {NULL};
+	struct caam_rsa_key *rsa_key = &ctx->key;
+	int ret;
+
+	/* Free the old RSA key if any */
+	caam_rsa_free_key(rsa_key);
+
+	ret = rsa_parse_priv_key(&raw_key, key, keylen);
+	if (ret)
+		return ret;
+
+	/* Copy key in DMA zone */
+	rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->d)
+		goto err;
+
+	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+	if (!rsa_key->e)
+		goto err;
+
+	/*
+	 * Skip leading zeros and copy the positive integer to a buffer
+	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+	 * expects a positive integer for the RSA modulus and uses its length as
+	 * decryption output length.
+	 */
+	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
+	if (!rsa_key->n)
+		goto err;
+
+	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
+		caam_rsa_free_key(rsa_key);
+		return -EINVAL;
+	}
+
+	rsa_key->d_sz = raw_key.d_sz;
+	rsa_key->e_sz = raw_key.e_sz;
+	rsa_key->n_sz = raw_key.n_sz;
+
+	memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
+	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
+	caam_rsa_set_priv_key_form(ctx, &raw_key);
+
+	return 0;
+
+err:
+	caam_rsa_free_key(rsa_key);
+	return -ENOMEM;
+}
+
+static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	return ctx->key.n_sz;
+}
+
+/* Per session pkc's driver context creation function */
+static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	ctx->dev = caam_jr_alloc();
+
+	if (IS_ERR(ctx->dev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(ctx->dev);
+	}
+
+	return 0;
+}
+
+/* Per session pkc's driver context cleanup function */
+static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct caam_rsa_key *key = &ctx->key;
+
+	caam_rsa_free_key(key);
+	caam_jr_free(ctx->dev);
+}
+
+static struct akcipher_alg caam_rsa = {
+	.encrypt = caam_rsa_enc,
+	.decrypt = caam_rsa_dec,
+	.sign = caam_rsa_dec,
+	.verify = caam_rsa_enc,
+	.set_pub_key = caam_rsa_set_pub_key,
+	.set_priv_key = caam_rsa_set_priv_key,
+	.max_size = caam_rsa_max_size,
+	.init = caam_rsa_init_tfm,
+	.exit = caam_rsa_exit_tfm,
+	.reqsize = sizeof(struct caam_rsa_req_ctx),
+	.base = {
+		.cra_name = "rsa",
+		.cra_driver_name = "rsa-caam",
+		.cra_priority = 3000,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct caam_rsa_ctx),
+	},
+};
+
+/* Public Key Cryptography module initialization handler */
+static int __init caam_pkc_init(void)
+{
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	u32 cha_inst, pk_inst;
+	int err;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+	/* Determine public key hardware accelerator presence. */
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+	pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+
+	/* Do not register algorithms if PKHA is not present. */
+	if (!pk_inst)
+		return -ENODEV;
+
+	err = crypto_register_akcipher(&caam_rsa);
+	if (err)
+		dev_warn(ctrldev, "%s alg registration failed\n",
+			 caam_rsa.base.cra_driver_name);
+	else
+		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+
+	return err;
+}
+
+static void __exit caam_pkc_exit(void)
+{
+	crypto_unregister_akcipher(&caam_rsa);
+}
+
+module_init(caam_pkc_init);
+module_exit(caam_pkc_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
new file mode 100644
index 0000000..82645bc
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+
+#ifndef _PKC_DESC_H_
+#define _PKC_DESC_H_
+#include "compat.h"
+#include "pdb.h"
+
+/**
+ * caam_priv_key_form - CAAM RSA private key representation
+ * CAAM RSA private key may have either of three forms.
+ *
+ * 1. The first representation consists of the pair (n, d), where the
+ *    components have the following meanings:
+ *        n      the RSA modulus
+ *        d      the RSA private exponent
+ *
+ * 2. The second representation consists of the triplet (p, q, d), where the
+ *    components have the following meanings:
+ *        p      the first prime factor of the RSA modulus n
+ *        q      the second prime factor of the RSA modulus n
+ *        d      the RSA private exponent
+ *
+ * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
+ *    where the components have the following meanings:
+ *        p      the first prime factor of the RSA modulus n
+ *        q      the second prime factor of the RSA modulus n
+ *        dP     the first factors's CRT exponent
+ *        dQ     the second factors's CRT exponent
+ *        qInv   the (first) CRT coefficient
+ *
+ * The benefit of using the third or the second key form is lower computational
+ * cost for the decryption and signature operations.
+ */
+enum caam_priv_key_form {
+	FORM1,
+	FORM2,
+	FORM3
+};
+
+/**
+ * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
+ * @n           : RSA modulus raw byte stream
+ * @e           : RSA public exponent raw byte stream
+ * @d           : RSA private exponent raw byte stream
+ * @p           : RSA prime factor p of RSA modulus n
+ * @q           : RSA prime factor q of RSA modulus n
+ * @dp          : RSA CRT exponent of p
+ * @dp          : RSA CRT exponent of q
+ * @qinv        : RSA CRT coefficient
+ * @tmp1        : CAAM uses this temporary buffer as internal state buffer.
+ *                It is assumed to be as long as p.
+ * @tmp2        : CAAM uses this temporary buffer as internal state buffer.
+ *                It is assumed to be as long as q.
+ * @n_sz        : length in bytes of RSA modulus n
+ * @e_sz        : length in bytes of RSA public exponent
+ * @d_sz        : length in bytes of RSA private exponent
+ * @p_sz        : length in bytes of RSA prime factor p of RSA modulus n
+ * @q_sz        : length in bytes of RSA prime factor q of RSA modulus n
+ * @priv_form   : CAAM RSA private key representation
+ */
+struct caam_rsa_key {
+	u8 *n;
+	u8 *e;
+	u8 *d;
+	u8 *p;
+	u8 *q;
+	u8 *dp;
+	u8 *dq;
+	u8 *qinv;
+	u8 *tmp1;
+	u8 *tmp2;
+	size_t n_sz;
+	size_t e_sz;
+	size_t d_sz;
+	size_t p_sz;
+	size_t q_sz;
+	enum caam_priv_key_form priv_form;
+};
+
+/**
+ * caam_rsa_ctx - per session context.
+ * @key         : RSA key in DMA zone
+ * @dev         : device structure
+ */
+struct caam_rsa_ctx {
+	struct caam_rsa_key key;
+	struct device *dev;
+};
+
+/**
+ * caam_rsa_req_ctx - per request context.
+ * @src: input scatterlist (stripped of leading zeros)
+ */
+struct caam_rsa_req_ctx {
+	struct scatterlist src[2];
+};
+
+/**
+ * rsa_edesc - s/w-extended rsa descriptor
+ * @src_nents     : number of segments in input scatterlist
+ * @dst_nents     : number of segments in output scatterlist
+ * @sec4_sg_bytes : length of h/w link table
+ * @sec4_sg_dma   : dma address of h/w link table
+ * @sec4_sg       : pointer to h/w link table
+ * @pdb           : specific RSA Protocol Data Block (PDB)
+ * @hw_desc       : descriptor followed by link tables if any
+ */
+struct rsa_edesc {
+	int src_nents;
+	int dst_nents;
+	int sec4_sg_bytes;
+	dma_addr_t sec4_sg_dma;
+	struct sec4_sg_entry *sec4_sg;
+	union {
+		struct rsa_pub_pdb pub;
+		struct rsa_priv_f1_pdb priv_f1;
+		struct rsa_priv_f2_pdb priv_f2;
+		struct rsa_priv_f3_pdb priv_f3;
+	} pdb;
+	u32 hw_desc[];
+};
+
+/* Descriptor construction primitives. */
+void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
+void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
+void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
+void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
+
+#endif
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
new file mode 100644
index 0000000..fde07d4
--- /dev/null
+++ b/drivers/crypto/caam/caamrng.c
@@ -0,0 +1,370 @@
+/*
+ * caam - Freescale FSL CAAM support for hw_random
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship between job descriptors to shared descriptors:
+ *
+ * ---------------                     --------------
+ * | JobDesc #0  |-------------------->| ShareDesc  |
+ * | *(buffer 0) |      |------------->| (generate) |
+ * ---------------      |              | (move)     |
+ *                      |              | (store)    |
+ * ---------------      |              --------------
+ * | JobDesc #1  |------|
+ * | *(buffer 1) |
+ * ---------------
+ *
+ * A job desc looks like this:
+ *
+ * ---------------------
+ * | Header            |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR       |
+ * | (output buffer)   |
+ * ---------------------
+ *
+ * The SharedDesc never changes, and each job descriptor points to one of two
+ * buffers for each device, from which the data will be copied into the
+ * requested destination
+ */
+
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/atomic.h>
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * Maximum buffer size: maximum number of random, cache-aligned bytes that
+ * will be generated and moved to seq out ptr (extlen not allowed)
+ */
+#define RN_BUF_SIZE			(0xffff / L1_CACHE_BYTES * \
+					 L1_CACHE_BYTES)
+
+/* length of descriptors */
+#define DESC_JOB_O_LEN			(CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
+#define DESC_RNG_LEN			(3 * CAAM_CMD_SZ)
+
+/* Buffer, its dma address and lock */
+struct buf_data {
+	u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
+	dma_addr_t addr;
+	struct completion filled;
+	u32 hw_desc[DESC_JOB_O_LEN];
+#define BUF_NOT_EMPTY 0
+#define BUF_EMPTY 1
+#define BUF_PENDING 2  /* Empty, but with job pending --don't submit another */
+	atomic_t empty;
+};
+
+/* rng per-device context */
+struct caam_rng_ctx {
+	struct device *jrdev;
+	dma_addr_t sh_desc_dma;
+	u32 sh_desc[DESC_RNG_LEN];
+	unsigned int cur_buf_idx;
+	int current_buf;
+	struct buf_data bufs[2];
+};
+
+static struct caam_rng_ctx *rng_ctx;
+
+static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
+{
+	if (bd->addr)
+		dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
+				 DMA_FROM_DEVICE);
+}
+
+static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
+{
+	struct device *jrdev = ctx->jrdev;
+
+	if (ctx->sh_desc_dma)
+		dma_unmap_single(jrdev, ctx->sh_desc_dma,
+				 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
+	rng_unmap_buf(jrdev, &ctx->bufs[0]);
+	rng_unmap_buf(jrdev, &ctx->bufs[1]);
+}
+
+static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
+{
+	struct buf_data *bd;
+
+	bd = container_of(desc, struct buf_data, hw_desc[0]);
+
+	if (err)
+		caam_jr_strstatus(jrdev, err);
+
+	atomic_set(&bd->empty, BUF_NOT_EMPTY);
+	complete(&bd->filled);
+
+	/* Buffer refilled, invalidate cache */
+	dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
+#endif
+}
+
+static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
+{
+	struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc = bd->hw_desc;
+	int err;
+
+	dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
+	init_completion(&bd->filled);
+	err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
+	if (err)
+		complete(&bd->filled); /* don't wait on failed job*/
+	else
+		atomic_inc(&bd->empty); /* note if pending */
+
+	return err;
+}
+
+static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct caam_rng_ctx *ctx = rng_ctx;
+	struct buf_data *bd = &ctx->bufs[ctx->current_buf];
+	int next_buf_idx, copied_idx;
+	int err;
+
+	if (atomic_read(&bd->empty)) {
+		/* try to submit job if there wasn't one */
+		if (atomic_read(&bd->empty) == BUF_EMPTY) {
+			err = submit_job(ctx, 1);
+			/* if can't submit job, can't even wait */
+			if (err)
+				return 0;
+		}
+		/* no immediate data, so exit if not waiting */
+		if (!wait)
+			return 0;
+
+		/* waiting for pending job */
+		if (atomic_read(&bd->empty))
+			wait_for_completion(&bd->filled);
+	}
+
+	next_buf_idx = ctx->cur_buf_idx + max;
+	dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
+		 __func__, ctx->current_buf, ctx->cur_buf_idx);
+
+	/* if enough data in current buffer */
+	if (next_buf_idx < RN_BUF_SIZE) {
+		memcpy(data, bd->buf + ctx->cur_buf_idx, max);
+		ctx->cur_buf_idx = next_buf_idx;
+		return max;
+	}
+
+	/* else, copy what's left... */
+	copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
+	memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
+	ctx->cur_buf_idx = 0;
+	atomic_set(&bd->empty, BUF_EMPTY);
+
+	/* ...refill... */
+	submit_job(ctx, 1);
+
+	/* and use next buffer */
+	ctx->current_buf = !ctx->current_buf;
+	dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
+
+	/* since there already is some data read, don't wait */
+	return copied_idx + caam_read(rng, data + copied_idx,
+				      max - copied_idx, false);
+}
+
+static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
+{
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc = ctx->sh_desc;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Generate random bytes */
+	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
+
+	/* Store bytes */
+	append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
+
+	ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+					  DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+		       desc, desc_bytes(desc), 1);
+#endif
+	return 0;
+}
+
+static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+{
+	struct device *jrdev = ctx->jrdev;
+	struct buf_data *bd = &ctx->bufs[buf_id];
+	u32 *desc = bd->hw_desc;
+	int sh_len = desc_len(ctx->sh_desc);
+
+	init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
+			     HDR_REVERSE);
+
+	bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, bd->addr)) {
+		dev_err(jrdev, "unable to map dst\n");
+		return -ENOMEM;
+	}
+
+	append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+		       desc, desc_bytes(desc), 1);
+#endif
+	return 0;
+}
+
+static void caam_cleanup(struct hwrng *rng)
+{
+	int i;
+	struct buf_data *bd;
+
+	for (i = 0; i < 2; i++) {
+		bd = &rng_ctx->bufs[i];
+		if (atomic_read(&bd->empty) == BUF_PENDING)
+			wait_for_completion(&bd->filled);
+	}
+
+	rng_unmap_ctx(rng_ctx);
+}
+
+static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+{
+	struct buf_data *bd = &ctx->bufs[buf_id];
+	int err;
+
+	err = rng_create_job_desc(ctx, buf_id);
+	if (err)
+		return err;
+
+	atomic_set(&bd->empty, BUF_EMPTY);
+	submit_job(ctx, buf_id == ctx->current_buf);
+	wait_for_completion(&bd->filled);
+
+	return 0;
+}
+
+static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+{
+	int err;
+
+	ctx->jrdev = jrdev;
+
+	err = rng_create_sh_desc(ctx);
+	if (err)
+		return err;
+
+	ctx->current_buf = 0;
+	ctx->cur_buf_idx = 0;
+
+	err = caam_init_buf(ctx, 0);
+	if (err)
+		return err;
+
+	return caam_init_buf(ctx, 1);
+}
+
+static struct hwrng caam_rng = {
+	.name		= "rng-caam",
+	.cleanup	= caam_cleanup,
+	.read		= caam_read,
+};
+
+static void __exit caam_rng_exit(void)
+{
+	caam_jr_free(rng_ctx->jrdev);
+	hwrng_unregister(&caam_rng);
+	kfree(rng_ctx);
+}
+
+static int __init caam_rng_init(void)
+{
+	struct device *dev;
+	struct device_node *dev_node;
+	struct platform_device *pdev;
+	struct device *ctrldev;
+	struct caam_drv_private *priv;
+	int err;
+
+	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	if (!dev_node) {
+		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+		if (!dev_node)
+			return -ENODEV;
+	}
+
+	pdev = of_find_device_by_node(dev_node);
+	if (!pdev) {
+		of_node_put(dev_node);
+		return -ENODEV;
+	}
+
+	ctrldev = &pdev->dev;
+	priv = dev_get_drvdata(ctrldev);
+	of_node_put(dev_node);
+
+	/*
+	 * If priv is NULL, it's probably because the caam driver wasn't
+	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+	 */
+	if (!priv)
+		return -ENODEV;
+
+	/* Check for an instantiated RNG before registration */
+	if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+		return -ENODEV;
+
+	dev = caam_jr_alloc();
+	if (IS_ERR(dev)) {
+		pr_err("Job Ring Device allocation for transform failed\n");
+		return PTR_ERR(dev);
+	}
+	rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
+	if (!rng_ctx) {
+		err = -ENOMEM;
+		goto free_caam_alloc;
+	}
+	err = caam_init_rng(rng_ctx, dev);
+	if (err)
+		goto free_rng_ctx;
+
+	dev_info(dev, "registering rng-caam\n");
+	return hwrng_register(&caam_rng);
+
+free_rng_ctx:
+	kfree(rng_ctx);
+free_caam_alloc:
+	caam_jr_free(dev);
+	return err;
+}
+
+module_init(caam_rng_init);
+module_exit(caam_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
new file mode 100644
index 0000000..1c71e0c
--- /dev/null
+++ b/drivers/crypto/caam/compat.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_COMPAT_H
+#define CAAM_COMPAT_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/hash.h>
+#include <linux/hw_random.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <net/xfrm.h>
+
+#include <crypto/algapi.h>
+#include <crypto/null.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/gcm.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/akcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+
+#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
new file mode 100644
index 0000000..538c01f
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.c
@@ -0,0 +1,907 @@
+/* * CAAM control-plane driver backend
+ * Controller-level driver, kernel property detection, initialization
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sys_soc.h>
+
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+#include "desc_constr.h"
+#include "ctrl.h"
+
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+bool caam_dpaa2;
+EXPORT_SYMBOL(caam_dpaa2);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+
+#ifdef CONFIG_CAAM_QI
+#include "qi.h"
+#endif
+
+/*
+ * i.MX targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device.
+ */
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+						char *clk_name)
+{
+	return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
+}
+
+/*
+ * Descriptor to instantiate RNG State Handle 0 in normal mode and
+ * load the JDKEK, TDKEK and TDSK registers
+ */
+static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
+{
+	u32 *jump_cmd, op_flags;
+
+	init_job_desc(desc, 0);
+
+	op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+			(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+
+	/* INIT RNG in non-test mode */
+	append_operation(desc, op_flags);
+
+	if (!handle && do_sk) {
+		/*
+		 * For SH0, Secure Keys must be generated as well
+		 */
+
+		/* wait for done */
+		jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+		set_jump_tgt_here(desc, jump_cmd);
+
+		/*
+		 * load 1 to clear written reg:
+		 * resets the done interrrupt and returns the RNG to idle.
+		 */
+		append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+		/* Initialize State Handle  */
+		append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+				 OP_ALG_AAI_RNG4_SK);
+	}
+
+	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
+
+/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+static void build_deinstantiation_desc(u32 *desc, int handle)
+{
+	init_job_desc(desc, 0);
+
+	/* Uninstantiate State Handle 0 */
+	append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+			 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
+
+	append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
+
+/*
+ * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+ *			  the software (no JR/QI used).
+ * @ctrldev - pointer to device
+ * @status - descriptor status, after being run
+ *
+ * Return: - 0 if no error occurred
+ *	   - -ENODEV if the DECO couldn't be acquired
+ *	   - -EAGAIN if an error occurred while executing the descriptor
+ */
+static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+					u32 *status)
+{
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+	struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+	struct caam_deco __iomem *deco = ctrlpriv->deco;
+	unsigned int timeout = 100000;
+	u32 deco_dbg_reg, flags;
+	int i;
+
+
+	if (ctrlpriv->virt_en == 1) {
+		clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
+
+		while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
+		       --timeout)
+			cpu_relax();
+
+		timeout = 100000;
+	}
+
+	clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
+
+	while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
+								 --timeout)
+		cpu_relax();
+
+	if (!timeout) {
+		dev_err(ctrldev, "failed to acquire DECO 0\n");
+		clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < desc_len(desc); i++)
+		wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
+
+	flags = DECO_JQCR_WHL;
+	/*
+	 * If the descriptor length is longer than 4 words, then the
+	 * FOUR bit in JRCTRL register must be set.
+	 */
+	if (desc_len(desc) >= 4)
+		flags |= DECO_JQCR_FOUR;
+
+	/* Instruct the DECO to execute it */
+	clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
+
+	timeout = 10000000;
+	do {
+		deco_dbg_reg = rd_reg32(&deco->desc_dbg);
+		/*
+		 * If an error occured in the descriptor, then
+		 * the DECO status field will be set to 0x0D
+		 */
+		if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+		    DESC_DBG_DECO_STAT_HOST_ERR)
+			break;
+		cpu_relax();
+	} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
+
+	*status = rd_reg32(&deco->op_status_hi) &
+		  DECO_OP_STATUS_HI_ERR_MASK;
+
+	if (ctrlpriv->virt_en == 1)
+		clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
+
+	/* Mark the DECO as free */
+	clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
+
+	if (!timeout)
+		return -EAGAIN;
+
+	return 0;
+}
+
+/*
+ * instantiate_rng - builds and executes a descriptor on DECO0,
+ *		     which initializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ *			for the RNG4 state handles which exist in
+ *			the RNG4 block: 1 if it's been instantiated
+ *			by an external entry, 0 otherwise.
+ * @gen_sk  - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+ *	      Caution: this can be done only once; if the keys need to be
+ *	      regenerated, a POR is required
+ *
+ * Return: - 0 if no error occurred
+ *	   - -ENOMEM if there isn't enough memory to allocate the descriptor
+ *	   - -ENODEV if DECO0 couldn't be acquired
+ *	   - -EAGAIN if an error occurred when executing the descriptor
+ *	      f.i. there was a RNG hardware error due to not "good enough"
+ *	      entropy being aquired.
+ */
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+			   int gen_sk)
+{
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+	struct caam_ctrl __iomem *ctrl;
+	u32 *desc, status = 0, rdsta_val;
+	int ret = 0, sh_idx;
+
+	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+	desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+		/*
+		 * If the corresponding bit is set, this state handle
+		 * was initialized by somebody else, so it's left alone.
+		 */
+		if ((1 << sh_idx) & state_handle_mask)
+			continue;
+
+		/* Create the descriptor for instantiating RNG State Handle */
+		build_instantiation_desc(desc, sh_idx, gen_sk);
+
+		/* Try to run it through DECO0 */
+		ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+		/*
+		 * If ret is not 0, or descriptor status is not 0, then
+		 * something went wrong. No need to try the next state
+		 * handle (if available), bail out here.
+		 * Also, if for some reason, the State Handle didn't get
+		 * instantiated although the descriptor has finished
+		 * without any error (HW optimizations for later
+		 * CAAM eras), then try again.
+		 */
+		if (ret)
+			break;
+
+		rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
+		if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
+		    !(rdsta_val & (1 << sh_idx))) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+		/* Clear the contents before recreating the descriptor */
+		memset(desc, 0x00, CAAM_CMD_SZ * 7);
+	}
+
+	kfree(desc);
+
+	return ret;
+}
+
+/*
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ *		       which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ *			for the RNG4 state handles which exist in
+ *			the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ *	   - -ENOMEM if there isn't enough memory to allocate the descriptor
+ *	   - -ENODEV if DECO0 couldn't be acquired
+ *	   - -EAGAIN if an error occurred when executing the descriptor
+ */
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+	u32 *desc, status;
+	int sh_idx, ret = 0;
+
+	desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+		/*
+		 * If the corresponding bit is set, then it means the state
+		 * handle was initialized by us, and thus it needs to be
+		 * deinitialized as well
+		 */
+		if ((1 << sh_idx) & state_handle_mask) {
+			/*
+			 * Create the descriptor for deinstantating this state
+			 * handle
+			 */
+			build_deinstantiation_desc(desc, sh_idx);
+
+			/* Try to run it through DECO0 */
+			ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+			if (ret ||
+			    (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
+				dev_err(ctrldev,
+					"Failed to deinstantiate RNG4 SH%d\n",
+					sh_idx);
+				break;
+			}
+			dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+		}
+	}
+
+	kfree(desc);
+
+	return ret;
+}
+
+static int caam_remove(struct platform_device *pdev)
+{
+	struct device *ctrldev;
+	struct caam_drv_private *ctrlpriv;
+	struct caam_ctrl __iomem *ctrl;
+
+	ctrldev = &pdev->dev;
+	ctrlpriv = dev_get_drvdata(ctrldev);
+	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+
+	/* Remove platform devices under the crypto node */
+	of_platform_depopulate(ctrldev);
+
+#ifdef CONFIG_CAAM_QI
+	if (ctrlpriv->qidev)
+		caam_qi_shutdown(ctrlpriv->qidev);
+#endif
+
+	/*
+	 * De-initialize RNG state handles initialized by this driver.
+	 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+	 */
+	if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
+		deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+	/* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+	/* Unmap controller region */
+	iounmap(ctrl);
+
+	/* shut clocks off before finalizing shutdown */
+	clk_disable_unprepare(ctrlpriv->caam_ipg);
+	if (ctrlpriv->caam_mem)
+		clk_disable_unprepare(ctrlpriv->caam_mem);
+	clk_disable_unprepare(ctrlpriv->caam_aclk);
+	if (ctrlpriv->caam_emi_slow)
+		clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+	return 0;
+}
+
+/*
+ * kick_trng - sets the various parameters for enabling the initialization
+ *	       of the RNG4 block in CAAM
+ * @pdev - pointer to the platform device
+ * @ent_delay - Defines the length (in system clocks) of each entropy sample.
+ */
+static void kick_trng(struct platform_device *pdev, int ent_delay)
+{
+	struct device *ctrldev = &pdev->dev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+	struct caam_ctrl __iomem *ctrl;
+	struct rng4tst __iomem *r4tst;
+	u32 val;
+
+	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+	r4tst = &ctrl->r4tst[0];
+
+	/* put RNG4 into program mode */
+	clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
+
+	/*
+	 * Performance-wise, it does not make sense to
+	 * set the delay to a value that is lower
+	 * than the last one that worked (i.e. the state handles
+	 * were instantiated properly. Thus, instead of wasting
+	 * time trying to set the values controlling the sample
+	 * frequency, the function simply returns.
+	 */
+	val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+	      >> RTSDCTL_ENT_DLY_SHIFT;
+	if (ent_delay <= val)
+		goto start_rng;
+
+	val = rd_reg32(&r4tst->rtsdctl);
+	val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+	      (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
+	wr_reg32(&r4tst->rtsdctl, val);
+	/* min. freq. count, equal to 1/4 of the entropy sample length */
+	wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+	/* disable maximum frequency count */
+	wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
+	/* read the control register */
+	val = rd_reg32(&r4tst->rtmctl);
+start_rng:
+	/*
+	 * select raw sampling in both entropy shifter
+	 * and statistical checker; ; put RNG4 into run mode
+	 */
+	clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
+}
+
+static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
+{
+	static const struct {
+		u16 ip_id;
+		u8 maj_rev;
+		u8 era;
+	} id[] = {
+		{0x0A10, 1, 1},
+		{0x0A10, 2, 2},
+		{0x0A12, 1, 3},
+		{0x0A14, 1, 3},
+		{0x0A14, 2, 4},
+		{0x0A16, 1, 4},
+		{0x0A10, 3, 4},
+		{0x0A11, 1, 4},
+		{0x0A18, 1, 4},
+		{0x0A11, 2, 5},
+		{0x0A12, 2, 5},
+		{0x0A13, 1, 5},
+		{0x0A1C, 1, 5}
+	};
+	u32 ccbvid, id_ms;
+	u8 maj_rev, era;
+	u16 ip_id;
+	int i;
+
+	ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
+	era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
+	if (era)	/* This is '0' prior to CAAM ERA-6 */
+		return era;
+
+	id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
+	ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
+	maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
+
+	for (i = 0; i < ARRAY_SIZE(id); i++)
+		if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
+			return id[i].era;
+
+	return -ENOTSUPP;
+}
+
+/**
+ * caam_get_era() - Return the ERA of the SEC on SoC, based
+ * on "sec-era" optional property in the DTS. This property is updated
+ * by u-boot.
+ * In case this property is not passed an attempt to retrieve the CAAM
+ * era via register reads will be made.
+ **/
+static int caam_get_era(struct caam_ctrl __iomem *ctrl)
+{
+	struct device_node *caam_node;
+	int ret;
+	u32 prop;
+
+	caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
+	of_node_put(caam_node);
+
+	if (!ret)
+		return prop;
+	else
+		return caam_get_era_from_hw(ctrl);
+}
+
+static const struct of_device_id caam_match[] = {
+	{
+		.compatible = "fsl,sec-v4.0",
+	},
+	{
+		.compatible = "fsl,sec4.0",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, caam_match);
+
+/* Probe routine for CAAM top (controller) level */
+static int caam_probe(struct platform_device *pdev)
+{
+	int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+	u64 caam_id;
+	static const struct soc_device_attribute imx_soc[] = {
+		{.family = "Freescale i.MX"},
+		{},
+	};
+	struct device *dev;
+	struct device_node *nprop, *np;
+	struct caam_ctrl __iomem *ctrl;
+	struct caam_drv_private *ctrlpriv;
+	struct clk *clk;
+#ifdef CONFIG_DEBUG_FS
+	struct caam_perfmon *perfmon;
+#endif
+	u32 scfgr, comp_params;
+	u32 cha_vid_ls;
+	int pg_size;
+	int BLOCK_OFFSET = 0;
+
+	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
+	if (!ctrlpriv)
+		return -ENOMEM;
+
+	dev = &pdev->dev;
+	dev_set_drvdata(dev, ctrlpriv);
+	nprop = pdev->dev.of_node;
+
+	caam_imx = (bool)soc_device_match(imx_soc);
+
+	/* Enable clocking */
+	clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM ipg clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_ipg = clk;
+
+	if (!of_machine_is_compatible("fsl,imx7d") &&
+	    !of_machine_is_compatible("fsl,imx7s")) {
+		clk = caam_drv_identify_clk(&pdev->dev, "mem");
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			dev_err(&pdev->dev,
+				"can't identify CAAM mem clk: %d\n", ret);
+			return ret;
+		}
+		ctrlpriv->caam_mem = clk;
+	}
+
+	clk = caam_drv_identify_clk(&pdev->dev, "aclk");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM aclk clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_aclk = clk;
+
+	if (!of_machine_is_compatible("fsl,imx6ul") &&
+	    !of_machine_is_compatible("fsl,imx7d") &&
+	    !of_machine_is_compatible("fsl,imx7s")) {
+		clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			dev_err(&pdev->dev,
+				"can't identify CAAM emi_slow clk: %d\n", ret);
+			return ret;
+		}
+		ctrlpriv->caam_emi_slow = clk;
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+		return ret;
+	}
+
+	if (ctrlpriv->caam_mem) {
+		ret = clk_prepare_enable(ctrlpriv->caam_mem);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+				ret);
+			goto disable_caam_ipg;
+		}
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_aclk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
+		goto disable_caam_mem;
+	}
+
+	if (ctrlpriv->caam_emi_slow) {
+		ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+				ret);
+			goto disable_caam_aclk;
+		}
+	}
+
+	/* Get configuration properties from device tree */
+	/* First, get register page */
+	ctrl = of_iomap(nprop, 0);
+	if (ctrl == NULL) {
+		dev_err(dev, "caam: of_iomap() failed\n");
+		ret = -ENOMEM;
+		goto disable_caam_emi_slow;
+	}
+
+	caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+				  (CSTA_PLEND | CSTA_ALT_PLEND));
+
+	/* Finding the page size for using the CTPR_MS register */
+	comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
+	pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+
+	/* Allocating the BLOCK_OFFSET based on the supported page size on
+	 * the platform
+	 */
+	if (pg_size == 0)
+		BLOCK_OFFSET = PG_SIZE_4K;
+	else
+		BLOCK_OFFSET = PG_SIZE_64K;
+
+	ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+	ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+			   ((__force uint8_t *)ctrl +
+			    BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
+			   );
+	ctrlpriv->deco = (struct caam_deco __iomem __force *)
+			 ((__force uint8_t *)ctrl +
+			 BLOCK_OFFSET * DECO_BLOCK_NUMBER
+			 );
+
+	/* Get the IRQ of the controller (for security violations only) */
+	ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
+
+	/*
+	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+	 * long pointers in master configuration register.
+	 * In case of SoCs with Management Complex, MC f/w performs
+	 * the configuration.
+	 */
+	caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+	np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
+	ctrlpriv->mc_en = !!np;
+	of_node_put(np);
+
+	if (!ctrlpriv->mc_en)
+		clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+			      MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+			      MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+			      (sizeof(dma_addr_t) == sizeof(u64) ?
+			       MCFGR_LONG_PTR : 0));
+
+	/*
+	 *  Read the Compile Time paramters and SCFGR to determine
+	 * if Virtualization is enabled for this platform
+	 */
+	scfgr = rd_reg32(&ctrl->scfgr);
+
+	ctrlpriv->virt_en = 0;
+	if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+		/* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+		 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+		 */
+		if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+		    (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+		       (scfgr & SCFGR_VIRT_EN)))
+				ctrlpriv->virt_en = 1;
+	} else {
+		/* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+		if (comp_params & CTPR_MS_VIRT_EN_POR)
+				ctrlpriv->virt_en = 1;
+	}
+
+	if (ctrlpriv->virt_en == 1)
+		clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
+			      JRSTART_JR1_START | JRSTART_JR2_START |
+			      JRSTART_JR3_START);
+
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		if (caam_dpaa2)
+			ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+		else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+			ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+		else
+			ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+	} else {
+		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	}
+	if (ret) {
+		dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+		goto iounmap_ctrl;
+	}
+
+	ctrlpriv->era = caam_get_era(ctrl);
+
+	ret = of_platform_populate(nprop, caam_match, NULL, dev);
+	if (ret) {
+		dev_err(dev, "JR platform devices creation error\n");
+		goto iounmap_ctrl;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	/*
+	 * FIXME: needs better naming distinction, as some amalgamation of
+	 * "caam" and nprop->full_name. The OF name isn't distinctive,
+	 * but does separate instances
+	 */
+	perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+
+	ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+	ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+#endif
+
+	ring = 0;
+	for_each_available_child_of_node(nprop, np)
+		if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+		    of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+			ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+					     ((__force uint8_t *)ctrl +
+					     (ring + JR_BLOCK_NUMBER) *
+					      BLOCK_OFFSET
+					     );
+			ctrlpriv->total_jobrs++;
+			ring++;
+		}
+
+	/* Check to see if (DPAA 1.x) QI present. If so, enable */
+	ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+	if (ctrlpriv->qi_present && !caam_dpaa2) {
+		ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+			       ((__force uint8_t *)ctrl +
+				 BLOCK_OFFSET * QI_BLOCK_NUMBER
+			       );
+		/* This is all that's required to physically enable QI */
+		wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
+
+		/* If QMAN driver is present, init CAAM-QI backend */
+#ifdef CONFIG_CAAM_QI
+		ret = caam_qi_init(pdev);
+		if (ret)
+			dev_err(dev, "caam qi i/f init failed: %d\n", ret);
+#endif
+	}
+
+	/* If no QI and no rings specified, quit and go home */
+	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
+		dev_err(dev, "no queues configured, terminating\n");
+		ret = -ENOMEM;
+		goto caam_remove;
+	}
+
+	cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+
+	/*
+	 * If SEC has RNG version >= 4 and RNG state handle has not been
+	 * already instantiated, do RNG instantiation
+	 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+	 */
+	if (!ctrlpriv->mc_en &&
+	    (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+		ctrlpriv->rng4_sh_init =
+			rd_reg32(&ctrl->r4tst[0].rdsta);
+		/*
+		 * If the secure keys (TDKEK, JDKEK, TDSK), were already
+		 * generated, signal this to the function that is instantiating
+		 * the state handles. An error would occur if RNG4 attempts
+		 * to regenerate these keys before the next POR.
+		 */
+		gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+		ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+		do {
+			int inst_handles =
+				rd_reg32(&ctrl->r4tst[0].rdsta) &
+								RDSTA_IFMASK;
+			/*
+			 * If either SH were instantiated by somebody else
+			 * (e.g. u-boot) then it is assumed that the entropy
+			 * parameters are properly set and thus the function
+			 * setting these (kick_trng(...)) is skipped.
+			 * Also, if a handle was instantiated, do not change
+			 * the TRNG parameters.
+			 */
+			if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+				dev_info(dev,
+					 "Entropy delay = %u\n",
+					 ent_delay);
+				kick_trng(pdev, ent_delay);
+				ent_delay += 400;
+			}
+			/*
+			 * if instantiate_rng(...) fails, the loop will rerun
+			 * and the kick_trng(...) function will modfiy the
+			 * upper and lower limits of the entropy sampling
+			 * interval, leading to a sucessful initialization of
+			 * the RNG.
+			 */
+			ret = instantiate_rng(dev, inst_handles,
+					      gen_sk);
+			if (ret == -EAGAIN)
+				/*
+				 * if here, the loop will rerun,
+				 * so don't hog the CPU
+				 */
+				cpu_relax();
+		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+		if (ret) {
+			dev_err(dev, "failed to instantiate RNG");
+			goto caam_remove;
+		}
+		/*
+		 * Set handles init'ed by this module as the complement of the
+		 * already initialized ones
+		 */
+		ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
+
+		/* Enable RDB bit so that RNG works faster */
+		clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
+	}
+
+	/* NOTE: RTIC detection ought to go here, around Si time */
+
+	caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
+		  (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
+
+	/* Report "alive" for developer to see */
+	dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
+		 ctrlpriv->era);
+	dev_info(dev, "job rings = %d, qi = %d\n",
+		 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+
+#ifdef CONFIG_DEBUG_FS
+	debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->req_dequeued,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ob_enc_req,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ib_dec_req,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+			    &caam_fops_u64_ro);
+	debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+			    &caam_fops_u64_ro);
+
+	/* Controller level - global status values */
+	debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->faultaddr,
+			    &caam_fops_u32_ro);
+	debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->faultdetail,
+			    &caam_fops_u32_ro);
+	debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
+			    ctrlpriv->ctl, &perfmon->status,
+			    &caam_fops_u32_ro);
+
+	/* Internal covering keys (useful in non-secure mode only) */
+	ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
+	ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_kek = debugfs_create_blob("kek",
+						S_IRUSR |
+						S_IRGRP | S_IROTH,
+						ctrlpriv->ctl,
+						&ctrlpriv->ctl_kek_wrap);
+
+	ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
+	ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
+						 S_IRUSR |
+						 S_IRGRP | S_IROTH,
+						 ctrlpriv->ctl,
+						 &ctrlpriv->ctl_tkek_wrap);
+
+	ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
+	ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+	ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
+						 S_IRUSR |
+						 S_IRGRP | S_IROTH,
+						 ctrlpriv->ctl,
+						 &ctrlpriv->ctl_tdsk_wrap);
+#endif
+	return 0;
+
+caam_remove:
+	caam_remove(pdev);
+	return ret;
+
+iounmap_ctrl:
+	iounmap(ctrl);
+disable_caam_emi_slow:
+	if (ctrlpriv->caam_emi_slow)
+		clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+disable_caam_aclk:
+	clk_disable_unprepare(ctrlpriv->caam_aclk);
+disable_caam_mem:
+	if (ctrlpriv->caam_mem)
+		clk_disable_unprepare(ctrlpriv->caam_mem);
+disable_caam_ipg:
+	clk_disable_unprepare(ctrlpriv->caam_ipg);
+	return ret;
+}
+
+static struct platform_driver caam_driver = {
+	.driver = {
+		.name = "caam",
+		.of_match_table = caam_match,
+	},
+	.probe       = caam_probe,
+	.remove      = caam_remove,
+};
+
+module_platform_driver(caam_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
new file mode 100644
index 0000000..f3ecd67
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM control-plane driver backend public-level include definitions
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CTRL_H
+#define CTRL_H
+
+/* Prototypes for backend-level services exposed to APIs */
+extern bool caam_dpaa2;
+
+#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
new file mode 100644
index 0000000..f76ff16
--- /dev/null
+++ b/drivers/crypto/caam/desc.h
@@ -0,0 +1,1656 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM descriptor composition header
+ * Definitions to support CAAM descriptor instruction generation
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef DESC_H
+#define DESC_H
+
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT		0x80000000	/* Entry points to table */
+#define SEC4_SG_LEN_FIN		0x40000000	/* Last ent in table */
+#define SEC4_SG_BPID_MASK	0x000000ff
+#define SEC4_SG_BPID_SHIFT	16
+#define SEC4_SG_LEN_MASK	0x3fffffff	/* Excludes EXT and FINAL */
+#define SEC4_SG_OFFSET_MASK	0x00001fff
+
+/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE	64
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE		16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT		27
+#define CMD_MASK		0xf8000000
+
+#define CMD_KEY			(0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY		(0x01 << CMD_SHIFT)
+#define CMD_LOAD		(0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD		(0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD		(0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD	(0x05 << CMD_SHIFT)
+#define CMD_STORE		(0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE		(0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE		(0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE	(0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN		(0x0e << CMD_SHIFT)
+#define CMD_MOVE		(0x0f << CMD_SHIFT)
+#define CMD_OPERATION		(0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE		(0x12 << CMD_SHIFT)
+#define CMD_JUMP		(0x14 << CMD_SHIFT)
+#define CMD_MATH		(0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR		(0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR	(0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR		(0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR		(0x1f << CMD_SHIFT)
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT		25
+#define CLASS_MASK		(0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE		(0x00 << CLASS_SHIFT)
+#define CLASS_1			(0x01 << CLASS_SHIFT)
+#define CLASS_2			(0x02 << CLASS_SHIFT)
+#define CLASS_BOTH		(0x03 << CLASS_SHIFT)
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Do Not Run - marks a descriptor inexecutable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR			0x01000000
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE			0x00800000
+#define HDR_ZRO			0x00008000
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_SHIFT	16
+#define HDR_START_IDX_MASK	(0x3f << HDR_START_IDX_SHIFT)
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK	0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK	0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED		0x00004000
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED	0x00002000
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX		0x00001000
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED		0x00001000
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE		0x00000800
+
+/* Propogate DNR property to SharedDesc */
+#define HDR_PROP_DNR		0x00000800
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_SHIFT	8
+#define HDR_SD_SHARE_MASK	(0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_JD_SHARE_SHIFT	8
+#define HDR_JD_SHARE_MASK	(0x07 << HDR_JD_SHARE_SHIFT)
+
+#define HDR_SHARE_NEVER		(0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT		(0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL	(0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS	(0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER		(0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK	0x7f
+#define HDR_SD_LENGTH_MASK	0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT	25	/* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK	(0x03 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF			0x01000000
+#define KEY_VLF			0x01000000
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM			0x00800000
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if TK is set
+ */
+#define KEY_ENC			0x00400000
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB			0x00200000
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT			0x00100000
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK			0x00008000
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
+ */
+#define KEY_DEST_SHIFT		16
+#define KEY_DEST_MASK		(0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG	(0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E		(0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX	(0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT	(0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK		0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT	25
+#define LDST_CLASS_MASK		(0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB	(0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB	(0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB	(0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO		(0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF		0x01000000
+#define LDST_VLF		LDST_SGF
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK		1
+#define LDST_IMM_SHIFT		23
+#define LDST_IMM		(LDST_IMM_MASK << LDST_IMM_SHIFT)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT	16
+#define LDST_SRCDST_MASK	(0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT	(0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY		(0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO		(0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO	(0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG	(0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG	(0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG	(0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG	(0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL	(0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL	(0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL	(0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD	(0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW		(0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0	(0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT		(0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1	(0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2	(0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ	(0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3	(0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ	(0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1	(0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ	(0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ	(0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ	(0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ	(0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX	(0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF	(0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB	(0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED	(0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE	(0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO	(0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT	8
+#define LDST_OFFSET_MASK	(0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT		0
+#define LDOFF_CHG_SHARE_MASK		(0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER		(0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP		(0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP	(0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO		(1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO	(1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT	4
+#define LDOFF_CHG_NONSEQLIODN_MASK	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ	(0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT	6
+#define LDOFF_CHG_SEQLIODN_MASK		(0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ		(0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes	*/
+#define LDST_LEN_SHIFT		0
+#define LDST_LEN_MASK		(0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT		(1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR		(1 << 6)
+#define LDLEN_RST_OFIFO			(1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID	(1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD	(1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT	0
+#define LDLEN_SET_OFIFO_OFFSET_MASK	(3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT	25
+#define FIFOLD_CLASS_MASK	(0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP	(0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1	(0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2	(0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH	(0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT	25
+#define FIFOST_CLASS_MASK	(0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL	(0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY	(0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY	(0x02 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT	24
+#define FIFOLDST_SGF_MASK	(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK	(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF		(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF		(1 << FIFOLDST_SGF_SHIFT)
+
+/* Immediate - Data follows command in descriptor */
+#define FIFOLD_IMM_SHIFT	23
+#define FIFOLD_IMM_MASK		(1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM		(1 << FIFOLD_IMM_SHIFT)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT	23
+#define FIFOST_CONT_MASK	(1 << FIFOST_CONT_SHIFT)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT	22
+#define FIFOLDST_EXT_MASK	(1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT		(1 << FIFOLDST_EXT_SHIFT)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT	16
+#define FIFOLD_CONT_TYPE_SHIFT	19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK	(0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK		(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK	(0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0	(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1	(0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2	(0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3	(0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0	(0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1	(0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2	(0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N	(0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A	(0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B	(0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK	(0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG		(0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2	(0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV		(0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA	(0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD		(0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV		(0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION	(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1	(0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1	(0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH	(0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2	(0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH	(0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO	(0x0F << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK	0xffff
+#define FIFOLDST_EXT_LEN_MASK	0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT	16
+#define FIFOST_TYPE_MASK	(0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0	 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1	 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2	 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3	 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0	 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1	 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2	 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3	 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N	 (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A	 (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B	 (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK	 (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK	 (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK	 (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK	 (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK	 (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK	 (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK	 (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE	 (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO	 (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP	 (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT		24
+#define OP_TYPE_MASK		(0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL	(0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK		(0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG	(0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG	(0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL	(0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL	(0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT		16
+#define OP_PCLID_MASK		(0xff << 16)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF	(0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF	(0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF	(0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF	(0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF	(0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF	(0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF		(0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB		(0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY	(0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR	(0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN	(0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY	(0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENC_PUBKEY  (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADEC_PRVKEY  (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5	(0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1	(0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224	(0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256	(0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384	(0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512	(0x25 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_MD5	(0x60 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA1	(0x61 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA224	(0x62 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA256	(0x63 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA384	(0x64 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA512	(0x65 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC		(0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP		(0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC		(0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI		(0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX		(0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30		(0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10		(0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11		(0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12		(0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS		(0x0c << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK				 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK		 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK			 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64			 0x0100
+#define OP_PCL_IPSEC_DES			 0x0200
+#define OP_PCL_IPSEC_3DES			 0x0300
+#define OP_PCL_IPSEC_AES_CBC			 0x0c00
+#define OP_PCL_IPSEC_AES_CTR			 0x0d00
+#define OP_PCL_IPSEC_AES_XTS			 0x1600
+#define OP_PCL_IPSEC_AES_CCM8			 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12			 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16			 0x1000
+#define OP_PCL_IPSEC_AES_GCM8			 0x1200
+#define OP_PCL_IPSEC_AES_GCM12			 0x1300
+#define OP_PCL_IPSEC_AES_GCM16			 0x1400
+
+#define OP_PCL_IPSEC_HMAC_NULL			 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96		 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96		 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96		 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128		 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160		 0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128		 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192		 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256		 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK			 0xff00
+#define OP_PCL_SRTP_AUTH_MASK			 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR			 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160		 0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17		 0xc022
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5		 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA		 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA		 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5		 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2		 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5			 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2		 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA		 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2		 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3		 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4		 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5		 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6		 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7		 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9		 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA			 0x0028
+
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7		 0x0026
+
+
+#define OP_PCL_TLS10_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA			 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512		 0xff65
+
+
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA			 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512		 0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5	0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18	 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5		 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7		 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7		 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3		 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3		 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10		 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA			 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256	0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2	 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3	 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4	 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5	 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6	 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256	0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2	 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3	 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4	 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5	 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6	 0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512		 0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17		 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17		 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5		0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10		 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11		 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12		 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13		 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14		 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15		 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16		 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17		 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18		 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5		 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5			 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA		 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7		 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA			 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7		 0x001a
+
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160		 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224		 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256		 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384		 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512		 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512		 0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM			 0x0201
+#define OP_PCL_WIMAX_OFDMA			 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI				 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC				 0x0001
+
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT	14
+#define OP_PCL_DKP_SRC_MASK	(3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM	(0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ	(1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR	(2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF	(3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT	12
+#define OP_PCL_DKP_DST_MASK	(3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM	(0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ	(1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR	(2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF	(3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT	0
+#define OP_PCL_DKP_KEY_MASK	(0xfff << OP_PCL_DKP_KEY_SHIFT)
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST			 0x0008
+#define OP_PCL_PKPROT_DECRYPT			 0x0004
+#define OP_PCL_PKPROT_ECC			 0x0002
+#define OP_PCL_PKPROT_F2M			 0x0001
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT	24
+#define OP_ALG_TYPE_MASK	(0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1	(2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2	(4 << OP_ALG_TYPE_SHIFT)
+
+#define OP_ALG_ALGSEL_SHIFT	16
+#define OP_ALG_ALGSEL_MASK	(0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK	(0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES	(0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES	(0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES	(0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4	(0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5	(0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1	(0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224	(0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256	(0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384	(0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512	(0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG	(0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW	(0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8	(0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI	(0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC	(0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9	(0xA0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT	4
+#define OP_ALG_AAI_MASK		(0x1ff << OP_ALG_AAI_SHIFT)
+
+/* blockcipher AAI set */
+#define OP_ALG_AAI_CTR_MOD128	(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8	(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16	(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24	(0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32	(0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40	(0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48	(0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56	(0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64	(0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72	(0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80	(0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88	(0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96	(0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104	(0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112	(0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120	(0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB		(0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB		(0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB		(0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS		(0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC		(0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC	(0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM		(0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM		(0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC	(0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC	(0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD	(0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK		(0x100 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_AAI_RNG		(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB	(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP	(0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_0	(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1	(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_PS	(0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI	(0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK	(0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH		(0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC		(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC		(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP	(0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_AAI_802		(0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385		(0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY	(0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS		(0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC		(0x40 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW AAI set */
+#define OP_ALG_AAI_F8		(0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9		(0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM		(0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE		(0x20 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT		2
+#define OP_ALG_AS_MASK		(0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE	(0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT		(1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE	(2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL	(3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT	1
+#define OP_ALG_ICV_MASK		(1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF		(0 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_ON		(1 << OP_ALG_ICV_SHIFT)
+
+#define OP_ALG_DIR_SHIFT	0
+#define OP_ALG_DIR_MASK		1
+#define OP_ALG_DECRYPT		0
+#define OP_ALG_ENCRYPT		1
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK		0x00800000
+#define OP_ALG_PK_FUN_MASK	0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM	0x80000
+#define OP_ALG_PKMODE_B_RAM	0x40000
+#define OP_ALG_PKMODE_E_RAM	0x20000
+#define OP_ALG_PKMODE_N_RAM	0x10000
+#define OP_ALG_PKMODE_CLEARMEM	0x00001
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY	0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY	0x40000
+#define OP_ALG_PKMODE_MOD_F2M		0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN		0x10000
+#define OP_ALG_PKMODE_PRJECTV		0x00800
+#define OP_ALG_PKMODE_TIME_EQ		0x400
+#define OP_ALG_PKMODE_OUT_B		0x000
+#define OP_ALG_PKMODE_OUT_A		0x100
+#define OP_ALG_PKMODE_MOD_ADD		0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB	0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA	0x004
+#define OP_ALG_PKMODE_MOD_MULT		0x005
+#define OP_ALG_PKMODE_MOD_EXPO		0x006
+#define OP_ALG_PKMODE_MOD_REDUCT	0x007
+#define OP_ALG_PKMODE_MOD_INV		0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD	0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL	0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT	0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST	0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST	0x00d
+#define OP_ALG_PKMODE_MOD_GCD		0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY	0x00f
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT	17
+#define OP_ALG_PKMODE_SRC_REG_MASK	(7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT	10
+#define OP_ALG_PKMODE_DST_REG_MASK	(7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT	8
+#define OP_ALG_PKMODE_SRC_SEG_MASK	(3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT	6
+#define OP_ALG_PKMODE_DST_SEG_MASK	(3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A		(0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B		(1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N		(3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A		(0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B		(1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E		(2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N		(3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0		(0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1		(1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2		(2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3		(3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0		(0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1		(1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2		(2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3		(3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ	0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ	0x81
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS	0x04000000
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL	0x02000000
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF	0x01000000
+
+/* Appends to a previous pointer */
+#define SQIN_PRE	0x00800000
+
+/* Use extended length following pointer */
+#define SQIN_EXT	0x00400000
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO	0x00200000
+
+/* Replace job descriptor */
+#define SQIN_RJD	0x00100000
+
+#define SQIN_LEN_SHIFT		 0
+#define SQIN_LEN_MASK		(0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF	0x01000000
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE	SQIN_PRE
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO	 SQIN_RTO
+
+/* Use extended length following pointer */
+#define SQOUT_EXT	0x00400000
+
+#define SQOUT_LEN_SHIFT		0
+#define SQOUT_LEN_MASK		(0xffff << SQOUT_LEN_SHIFT)
+
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT		16
+#define SIGN_TYPE_MASK		(0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL		(0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2		(0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3		(0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4		(0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT		25
+#define MOVE_AUX_MASK		(3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS		(2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS		(1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT	24
+#define MOVE_WAITCOMP_MASK	(1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP		(1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT		20
+#define MOVE_SRC_MASK		(0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX	(0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX	(0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO	(0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF	(0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0		(0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1		(0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2		(0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3		(0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO		(0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL	(0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT		16
+#define MOVE_DEST_MASK		(0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX	(0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX	(0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO	(0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF	(0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0		(0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1		(0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2		(0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3		(0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO	(0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO	(0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A		(0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY	(0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY	(0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT	8
+#define MOVE_OFFSET_MASK	(0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT		0
+#define MOVE_LEN_MASK		(0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT	0
+#define MOVELEN_MRSEL_MASK	(0x3 << MOVE_LEN_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT		26
+#define MATH_IFB_MASK		(1 << MATH_IFB_SHIFT)
+#define MATH_IFB		(1 << MATH_IFB_SHIFT)
+
+#define MATH_NFU_SHIFT		25
+#define MATH_NFU_MASK		(1 << MATH_NFU_SHIFT)
+#define MATH_NFU		(1 << MATH_NFU_SHIFT)
+
+#define MATH_STL_SHIFT		24
+#define MATH_STL_MASK		(1 << MATH_STL_SHIFT)
+#define MATH_STL		(1 << MATH_STL_SHIFT)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT		20
+#define MATH_FUN_MASK		(0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD		(0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC		(0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB		(0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB		(0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR		(0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND		(0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR		(0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT		(0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT		(0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD		(0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT		(0x0a << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT		16
+#define MATH_SRC0_MASK		(0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0		(0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1		(0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2		(0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3		(0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM		(0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_DPOVRD	(0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN	(0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN	(0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN	(0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN	(0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO		(0x0c << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT		12
+#define MATH_SRC1_MASK		(0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0		(0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1		(0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2		(0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3		(0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM		(0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_DPOVRD	(0x07 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO	(0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO	(0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE		(0x0c << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT		8
+#define MATH_DEST_MASK		(0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0		(0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1		(0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2		(0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3		(0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD	(0x07 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN	(0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN	(0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN	(0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN	(0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE		(0x0f << MATH_DEST_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT		0
+#define MATH_LEN_MASK		(0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE		0x01
+#define MATH_LEN_2BYTE		0x02
+#define MATH_LEN_4BYTE		0x04
+#define MATH_LEN_8BYTE		0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT	25
+#define JUMP_CLASS_MASK		(3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE		0
+#define JUMP_CLASS_CLASS1	(1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2	(2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH		(3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT		24
+#define JUMP_JSL_MASK		(1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL		(1 << JUMP_JSL_SHIFT)
+
+#define JUMP_TYPE_SHIFT		22
+#define JUMP_TYPE_LOCAL		(0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL	(0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT		(0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER	(0x03 << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT		16
+#define JUMP_TEST_MASK		(0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL		(0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL	(0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY		(0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY	(0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT		8
+#define JUMP_COND_MASK		(0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0		(0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1	(0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME	(0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N	(0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z	(0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C	(0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV	(0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP		((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD		((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF		((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM		((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP		((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP		((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP		((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP		((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT	0
+#define JUMP_OFFSET_MASK	(0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT	30
+#define NFIFOENTRY_DEST_MASK	(3 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_DECO	(0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1	(1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2	(2 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_BOTH	(3 << NFIFOENTRY_DEST_SHIFT)
+
+#define NFIFOENTRY_LC2_SHIFT	29
+#define NFIFOENTRY_LC2_MASK	(1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2		(1 << NFIFOENTRY_LC2_SHIFT)
+
+#define NFIFOENTRY_LC1_SHIFT	28
+#define NFIFOENTRY_LC1_MASK	(1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1		(1 << NFIFOENTRY_LC1_SHIFT)
+
+#define NFIFOENTRY_FC2_SHIFT	27
+#define NFIFOENTRY_FC2_MASK	(1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2		(1 << NFIFOENTRY_FC2_SHIFT)
+
+#define NFIFOENTRY_FC1_SHIFT	26
+#define NFIFOENTRY_FC1_MASK	(1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1		(1 << NFIFOENTRY_FC1_SHIFT)
+
+#define NFIFOENTRY_STYPE_SHIFT	24
+#define NFIFOENTRY_STYPE_MASK	(3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO	(0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO	(1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD	(2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP	(3 << NFIFOENTRY_STYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SHIFT	20
+#define NFIFOENTRY_DTYPE_MASK	(0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX	(0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD	(0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV	(0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD	(0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV	(0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP	(0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG	(0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0	(0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1	(0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2	(0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3	(0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0	(0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1	(0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2	(0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3	(0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N	(0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E	(0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A	(0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B	(0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+
+#define NFIFOENTRY_BND_SHIFT	19
+#define NFIFOENTRY_BND_MASK	(1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND		(1 << NFIFOENTRY_BND_SHIFT)
+
+#define NFIFOENTRY_PTYPE_SHIFT	16
+#define NFIFOENTRY_PTYPE_MASK	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS		(0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS	(0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT	(0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND		(0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ	(0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ	(0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N		(0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT	15
+#define NFIFOENTRY_OC_MASK	(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC		(1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_AST_SHIFT	14
+#define NFIFOENTRY_AST_MASK	(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST		(1 << NFIFOENTRY_OC_SHIFT)
+
+#define NFIFOENTRY_BM_SHIFT	11
+#define NFIFOENTRY_BM_MASK	(1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM		(1 << NFIFOENTRY_BM_SHIFT)
+
+#define NFIFOENTRY_PS_SHIFT	10
+#define NFIFOENTRY_PS_MASK	(1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS		(1 << NFIFOENTRY_PS_SHIFT)
+
+#define NFIFOENTRY_DLEN_SHIFT	0
+#define NFIFOENTRY_DLEN_MASK	(0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT	0
+#define NFIFOENTRY_PLEN_MASK	(0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/* Append Load Immediate Command */
+#define FD_CMD_APPEND_LOAD_IMMEDIATE			0x80000000
+
+/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN		0x40000000
+
+/* Frame Descriptor Command for Replacement Job Descriptor */
+#define FD_CMD_REPLACE_JOB_DESC				0x20000000
+
+#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
new file mode 100644
index 0000000..d4256fa
--- /dev/null
+++ b/drivers/crypto/caam/desc_constr.h
@@ -0,0 +1,540 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * caam descriptor construction helper functions
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef DESC_CONSTR_H
+#define DESC_CONSTR_H
+
+#include "desc.h"
+#include "regs.h"
+
+#define IMMEDIATE (1 << 23)
+#define CAAM_CMD_SZ sizeof(u32)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+#ifdef DEBUG
+#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
+			      &__func__[sizeof("append")]); } while (0)
+#else
+#define PRINT_POS
+#endif
+
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+			       LDST_SRCDST_WORD_DECOCTRL | \
+			       (LDOFF_CHG_SHARE_OK_NO_PROP << \
+				LDST_OFFSET_SHIFT))
+#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+				LDST_SRCDST_WORD_DECOCTRL | \
+				(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+			       LDST_SRCDST_WORD_DECOCTRL | \
+			       (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+
+extern bool caam_little_end;
+
+static inline int desc_len(u32 * const desc)
+{
+	return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
+}
+
+static inline int desc_bytes(void * const desc)
+{
+	return desc_len(desc) * CAAM_CMD_SZ;
+}
+
+static inline u32 *desc_end(u32 * const desc)
+{
+	return desc + desc_len(desc);
+}
+
+static inline void *sh_desc_pdb(u32 * const desc)
+{
+	return desc + 1;
+}
+
+static inline void init_desc(u32 * const desc, u32 options)
+{
+	*desc = cpu_to_caam32((options | HDR_ONE) + 1);
+}
+
+static inline void init_sh_desc(u32 * const desc, u32 options)
+{
+	PRINT_POS;
+	init_desc(desc, CMD_SHARED_DESC_HDR | options);
+}
+
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+				    size_t pdb_bytes)
+{
+	u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+	init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) |
+		     options);
+}
+
+static inline void init_job_desc(u32 * const desc, u32 options)
+{
+	init_desc(desc, CMD_DESC_HDR | options);
+}
+
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+				     size_t pdb_bytes)
+{
+	u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+	init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
+}
+
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
+{
+	dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
+
+	*offset = cpu_to_caam_dma(ptr);
+
+	(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
+				CAAM_PTR_SZ / CAAM_CMD_SZ);
+}
+
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+					int len, u32 options)
+{
+	PRINT_POS;
+	init_job_desc(desc, HDR_SHARED | options |
+		      (len << HDR_START_IDX_SHIFT));
+	append_ptr(desc, ptr);
+}
+
+static inline void append_data(u32 * const desc, const void *data, int len)
+{
+	u32 *offset = desc_end(desc);
+
+	if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+		memcpy(offset, data, len);
+
+	(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
+				(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
+}
+
+static inline void append_cmd(u32 * const desc, u32 command)
+{
+	u32 *cmd = desc_end(desc);
+
+	*cmd = cpu_to_caam32(command);
+
+	(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 1);
+}
+
+#define append_u32 append_cmd
+
+static inline void append_u64(u32 * const desc, u64 data)
+{
+	u32 *offset = desc_end(desc);
+
+	/* Only 32-bit alignment is guaranteed in descriptor buffer */
+	if (caam_little_end) {
+		*offset = cpu_to_caam32(lower_32_bits(data));
+		*(++offset) = cpu_to_caam32(upper_32_bits(data));
+	} else {
+		*offset = cpu_to_caam32(upper_32_bits(data));
+		*(++offset) = cpu_to_caam32(lower_32_bits(data));
+	}
+
+	(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 2);
+}
+
+/* Write command without affecting header, and return pointer to next word */
+static inline u32 *write_cmd(u32 * const desc, u32 command)
+{
+	*desc = cpu_to_caam32(command);
+
+	return desc + 1;
+}
+
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
+				  u32 command)
+{
+	append_cmd(desc, command | len);
+	append_ptr(desc, ptr);
+}
+
+/* Write length after pointer, rather than inside command */
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
+					 unsigned int len, u32 command)
+{
+	append_cmd(desc, command);
+	if (!(command & (SQIN_RTO | SQIN_PRE)))
+		append_ptr(desc, ptr);
+	append_cmd(desc, len);
+}
+
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
+				   u32 command)
+{
+	append_cmd(desc, command | IMMEDIATE | len);
+	append_data(desc, data, len);
+}
+
+#define APPEND_CMD_RET(cmd, op) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
+{ \
+	u32 *cmd = desc_end(desc); \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | options); \
+	return cmd; \
+}
+APPEND_CMD_RET(jump, JUMP)
+APPEND_CMD_RET(move, MOVE)
+
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
+{
+	*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
+				  (desc_len(desc) - (jump_cmd - desc)));
+}
+
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
+{
+	u32 val = caam32_to_cpu(*move_cmd);
+
+	val &= ~MOVE_OFFSET_MASK;
+	val |= (desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+	*move_cmd = cpu_to_caam32(val);
+}
+
+#define APPEND_CMD(cmd, op) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | options); \
+}
+APPEND_CMD(operation, OPERATION)
+
+#define APPEND_CMD_LEN(cmd, op) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+				u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | len | options); \
+}
+
+APPEND_CMD_LEN(seq_load, SEQ_LOAD)
+APPEND_CMD_LEN(seq_store, SEQ_STORE)
+APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
+APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
+
+#define APPEND_CMD_PTR(cmd, op) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+				unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR(key, KEY)
+APPEND_CMD_PTR(load, LOAD)
+APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
+APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+				unsigned int len, u32 options)
+{
+	u32 cmd_src;
+
+	cmd_src = options & LDST_SRCDST_MASK;
+
+	append_cmd(desc, CMD_STORE | options | len);
+
+	/* The following options do not require pointer */
+	if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED ||
+	      cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB    ||
+	      cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE ||
+	      cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE))
+		append_ptr(desc, ptr);
+}
+
+#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+						 dma_addr_t ptr, \
+						 unsigned int len, \
+						 u32 options) \
+{ \
+	PRINT_POS; \
+	if (options & (SQIN_RTO | SQIN_PRE)) \
+		append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \
+	else \
+		append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
+}
+APPEND_SEQ_PTR_INTLEN(in, IN)
+APPEND_SEQ_PTR_INTLEN(out, OUT)
+
+#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
+					 unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd_data(desc, data, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR_TO_IMM(load, LOAD);
+APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+
+#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
+					 unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
+}
+APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR)
+APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
+
+/*
+ * Determine whether to store length internally or externally depending on
+ * the size of its type
+ */
+#define APPEND_CMD_PTR_LEN(cmd, op, type) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+				type len, u32 options) \
+{ \
+	PRINT_POS; \
+	if (sizeof(type) > sizeof(u16)) \
+		append_##cmd##_extlen(desc, ptr, len, options); \
+	else \
+		append_##cmd##_intlen(desc, ptr, len, options); \
+}
+APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32)
+APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
+
+/*
+ * 2nd variant for commands whose specified immediate length differs
+ * from length of immediate data provided, e.g., split keys
+ */
+#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
+					 unsigned int data_len, \
+					 unsigned int len, u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
+	append_data(desc, data, data_len); \
+}
+APPEND_CMD_PTR_TO_IMM2(key, KEY);
+
+#define APPEND_CMD_RAW_IMM(cmd, op, type) \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
+					     u32 options) \
+{ \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+	append_cmd(desc, immediate); \
+}
+APPEND_CMD_RAW_IMM(load, LOAD, u32);
+
+/*
+ * ee - endianness
+ * size - size of immediate type in bytes
+ */
+#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \
+static inline void append_##cmd##_imm_##ee##size(u32 *desc, \
+						   u##size immediate, \
+						   u32 options) \
+{ \
+	__##ee##size data = cpu_to_##ee##size(immediate); \
+	PRINT_POS; \
+	append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \
+	append_data(desc, &data, sizeof(data)); \
+}
+
+APPEND_CMD_RAW_IMM2(load, LOAD, be, 32);
+
+/*
+ * Append math command. Only the last part of destination and source need to
+ * be specified
+ */
+#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+	MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len);
+
+#define append_math_add(desc, dest, src0, src1, len) \
+	APPEND_MATH(ADD, desc, dest, src0, src1, len)
+#define append_math_sub(desc, dest, src0, src1, len) \
+	APPEND_MATH(SUB, desc, dest, src0, src1, len)
+#define append_math_add_c(desc, dest, src0, src1, len) \
+	APPEND_MATH(ADDC, desc, dest, src0, src1, len)
+#define append_math_sub_b(desc, dest, src0, src1, len) \
+	APPEND_MATH(SUBB, desc, dest, src0, src1, len)
+#define append_math_and(desc, dest, src0, src1, len) \
+	APPEND_MATH(AND, desc, dest, src0, src1, len)
+#define append_math_or(desc, dest, src0, src1, len) \
+	APPEND_MATH(OR, desc, dest, src0, src1, len)
+#define append_math_xor(desc, dest, src0, src1, len) \
+	APPEND_MATH(XOR, desc, dest, src0, src1, len)
+#define append_math_lshift(desc, dest, src0, src1, len) \
+	APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+#define append_math_rshift(desc, dest, src0, src1, len) \
+	APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+#define append_math_ldshift(desc, dest, src0, src1, len) \
+	APPEND_MATH(SHLD, desc, dest, src0, src1, len)
+
+/* Exactly one source is IMM. Data is passed in as u32 value */
+#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+do { \
+	APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+	append_cmd(desc, data); \
+} while (0)
+
+#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
+
+/* Exactly one source is IMM. Data is passed in as u64 value */
+#define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \
+do { \
+	u32 upper = (data >> 16) >> 16; \
+	APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \
+		    (upper ? 0 : MATH_IFB)); \
+	if (upper) \
+		append_u64(desc, data); \
+	else \
+		append_u32(desc, lower_32_bits(data)); \
+} while (0)
+
+#define append_math_add_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
+	APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ *           functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ *       is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ *              referenced by the descriptor
+ */
+struct alginfo {
+	u32 algtype;
+	unsigned int keylen;
+	unsigned int keylen_pad;
+	union {
+		dma_addr_t key_dma;
+		const void *key_virt;
+	};
+	bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ *                       and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ *               excluding the data items to be inlined (or corresponding
+ *               pointer if an item is not inlined). Each cnstr_* function that
+ *               generates descriptors should have a define mentioning
+ *               corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ *          together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ *            otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ *         check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+				    unsigned int jd_len, unsigned int *data_len,
+				    u32 *inl_mask, unsigned int count)
+{
+	int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+	unsigned int i;
+
+	*inl_mask = 0;
+	for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+		if (rem_bytes - (int)(data_len[i] +
+			(count - i - 1) * CAAM_PTR_SZ) >= 0) {
+			rem_bytes -= data_len[i];
+			*inl_mask |= (1 << i);
+		} else {
+			rem_bytes -= CAAM_PTR_SZ;
+		}
+	}
+
+	return (rem_bytes >= 0) ? 0 : -1;
+}
+
+/**
+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ *         keylen should be the length of initial key, while keylen_pad
+ *         the length of the derived (split) key.
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ *         SHA256, SHA384, SHA512}.
+ */
+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
+{
+	u32 protid;
+
+	/*
+	 * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
+	 * to OP_PCLID_DKP_{MD5, SHA*}
+	 */
+	protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
+		 (0x20 << OP_ALG_ALGSEL_SHIFT);
+
+	if (adata->key_inline) {
+		int words;
+
+		append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+				 OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
+				 adata->keylen);
+		append_data(desc, adata->key_virt, adata->keylen);
+
+		/* Reserve space in descriptor buffer for the derived key */
+		words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+			 ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
+		if (words)
+			(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
+	} else {
+		append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+				 OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
+				 adata->keylen);
+		append_ptr(desc, adata->key_dma);
+	}
+}
+
+#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
new file mode 100644
index 0000000..8da88be
--- /dev/null
+++ b/drivers/crypto/caam/error.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CAAM Error Reporting
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "desc.h"
+#include "error.h"
+
+#ifdef DEBUG
+#include <linux/highmem.h>
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+		  int rowsize, int groupsize, struct scatterlist *sg,
+		  size_t tlen, bool ascii)
+{
+	struct scatterlist *it;
+	void *it_page;
+	size_t len;
+	void *buf;
+
+	for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+		/*
+		 * make sure the scatterlist's page
+		 * has a valid virtual memory mapping
+		 */
+		it_page = kmap_atomic(sg_page(it));
+		if (unlikely(!it_page)) {
+			pr_err("caam_dump_sg: kmap failed\n");
+			return;
+		}
+
+		buf = it_page + it->offset;
+		len = min_t(size_t, tlen, it->length);
+		print_hex_dump(level, prefix_str, prefix_type, rowsize,
+			       groupsize, buf, len, ascii);
+		tlen -= len;
+
+		kunmap_atomic(it_page);
+	}
+}
+#else
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+		  int rowsize, int groupsize, struct scatterlist *sg,
+		  size_t tlen, bool ascii)
+{}
+#endif /* DEBUG */
+EXPORT_SYMBOL(caam_dump_sg);
+
+static const struct {
+	u8 value;
+	const char *error_text;
+} desc_error_list[] = {
+	{ 0x00, "No error." },
+	{ 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." },
+	{ 0x02, "SGT Null Entry Error." },
+	{ 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." },
+	{ 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." },
+	{ 0x05, "Reserved." },
+	{ 0x06, "Invalid KEY Command" },
+	{ 0x07, "Invalid LOAD Command" },
+	{ 0x08, "Invalid STORE Command" },
+	{ 0x09, "Invalid OPERATION Command" },
+	{ 0x0A, "Invalid FIFO LOAD Command" },
+	{ 0x0B, "Invalid FIFO STORE Command" },
+	{ 0x0C, "Invalid MOVE/MOVE_LEN Command" },
+	{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." },
+	{ 0x0E, "Invalid MATH Command" },
+	{ 0x0F, "Invalid SIGNATURE Command" },
+	{ 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." },
+	{ 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
+	{ 0x12, "Shared Descriptor Header Error" },
+	{ 0x13, "Header Error. Invalid length or parity, or certain other problems." },
+	{ 0x14, "Burster Error. Burster has gotten to an illegal state" },
+	{ 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." },
+	{ 0x16, "DMA Error" },
+	{ 0x17, "Reserved." },
+	{ 0x1A, "Job failed due to JR reset" },
+	{ 0x1B, "Job failed due to Fail Mode" },
+	{ 0x1C, "DECO Watchdog timer timeout error" },
+	{ 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" },
+	{ 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" },
+	{ 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." },
+	{ 0x20, "DECO has completed a reset initiated via the DRR register" },
+	{ 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." },
+	{ 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." },
+	{ 0x23, "Read Input Frame error" },
+	{ 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
+	{ 0x80, "DNR (do not run) error" },
+	{ 0x81, "undefined protocol command" },
+	{ 0x82, "invalid setting in PDB" },
+	{ 0x83, "Anti-replay LATE error" },
+	{ 0x84, "Anti-replay REPLAY error" },
+	{ 0x85, "Sequence number overflow" },
+	{ 0x86, "Sigver invalid signature" },
+	{ 0x87, "DSA Sign Illegal test descriptor" },
+	{ 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." },
+	{ 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." },
+	{ 0xC1, "Blob Command error: Undefined mode" },
+	{ 0xC2, "Blob Command error: Secure Memory Blob mode error" },
+	{ 0xC4, "Blob Command error: Black Blob key or input size error" },
+	{ 0xC5, "Blob Command error: Invalid key destination" },
+	{ 0xC8, "Blob Command error: Trusted/Secure mode error" },
+	{ 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" },
+	{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+};
+
+static const char * const cha_id_list[] = {
+	"",
+	"AES",
+	"DES",
+	"ARC4",
+	"MDHA",
+	"RNG",
+	"SNOW f8",
+	"Kasumi f8/9",
+	"PKHA",
+	"CRCA",
+	"SNOW f9",
+	"ZUCE",
+	"ZUCA",
+};
+
+static const char * const err_id_list[] = {
+	"No error.",
+	"Mode error.",
+	"Data size error.",
+	"Key size error.",
+	"PKHA A memory size error.",
+	"PKHA B memory size error.",
+	"Data arrived out of sequence error.",
+	"PKHA divide-by-zero error.",
+	"PKHA modulus even error.",
+	"DES key parity error.",
+	"ICV check failed.",
+	"Hardware error.",
+	"Unsupported CCM AAD size.",
+	"Class 1 CHA is not reset",
+	"Invalid CHA combination was selected",
+	"Invalid CHA selected.",
+};
+
+static const char * const rng_err_id_list[] = {
+	"",
+	"",
+	"",
+	"Instantiate",
+	"Not instantiated",
+	"Test instantiate",
+	"Prediction resistance",
+	"Prediction resistance and test request",
+	"Uninstantiate",
+	"Secure key generation",
+};
+
+static void report_ccb_status(struct device *jrdev, const u32 status,
+			      const char *error)
+{
+	u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
+		    JRSTA_CCBERR_CHAID_SHIFT;
+	u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+	u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+		  JRSTA_DECOERR_INDEX_SHIFT;
+	char *idx_str;
+	const char *cha_str = "unidentified cha_id value 0x";
+	char cha_err_code[3] = { 0 };
+	const char *err_str = "unidentified err_id value 0x";
+	char err_err_code[3] = { 0 };
+
+	if (status & JRSTA_DECOERR_JUMP)
+		idx_str = "jump tgt desc idx";
+	else
+		idx_str = "desc idx";
+
+	if (cha_id < ARRAY_SIZE(cha_id_list))
+		cha_str = cha_id_list[cha_id];
+	else
+		snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
+
+	if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
+	    err_id < ARRAY_SIZE(rng_err_id_list) &&
+	    strlen(rng_err_id_list[err_id])) {
+		/* RNG-only error */
+		err_str = rng_err_id_list[err_id];
+	} else {
+		err_str = err_id_list[err_id];
+	}
+
+	/*
+	 * CCB ICV check failures are part of normal operation life;
+	 * we leave the upper layers to do what they want with them.
+	 */
+	if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+		dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
+			status, error, idx_str, idx,
+			cha_str, cha_err_code,
+			err_str, err_err_code);
+}
+
+static void report_jump_status(struct device *jrdev, const u32 status,
+			       const char *error)
+{
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
+}
+
+static void report_deco_status(struct device *jrdev, const u32 status,
+			       const char *error)
+{
+	u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
+	u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+		  JRSTA_DECOERR_INDEX_SHIFT;
+	char *idx_str;
+	const char *err_str = "unidentified error value 0x";
+	char err_err_code[3] = { 0 };
+	int i;
+
+	if (status & JRSTA_DECOERR_JUMP)
+		idx_str = "jump tgt desc idx";
+	else
+		idx_str = "desc idx";
+
+	for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
+		if (desc_error_list[i].value == err_id)
+			break;
+
+	if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
+		err_str = desc_error_list[i].error_text;
+	else
+		snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+	dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
+		status, error, idx_str, idx, err_str, err_err_code);
+}
+
+static void report_jr_status(struct device *jrdev, const u32 status,
+			     const char *error)
+{
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
+}
+
+static void report_cond_code_status(struct device *jrdev, const u32 status,
+				    const char *error)
+{
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
+}
+
+void caam_jr_strstatus(struct device *jrdev, u32 status)
+{
+	static const struct stat_src {
+		void (*report_ssed)(struct device *jrdev, const u32 status,
+				    const char *error);
+		const char *error;
+	} status_src[16] = {
+		{ NULL, "No error" },
+		{ NULL, NULL },
+		{ report_ccb_status, "CCB" },
+		{ report_jump_status, "Jump" },
+		{ report_deco_status, "DECO" },
+		{ NULL, "Queue Manager Interface" },
+		{ report_jr_status, "Job Ring" },
+		{ report_cond_code_status, "Condition Code" },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+	};
+	u32 ssrc = status >> JRSTA_SSRC_SHIFT;
+	const char *error = status_src[ssrc].error;
+
+	/*
+	 * If there is an error handling function, call it to report the error.
+	 * Otherwise print the error source name.
+	 */
+	if (status_src[ssrc].report_ssed)
+		status_src[ssrc].report_ssed(jrdev, status, error);
+	else if (error)
+		dev_err(jrdev, "%d: %s\n", ssrc, error);
+	else
+		dev_err(jrdev, "%d: unknown error source\n", ssrc);
+}
+EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
new file mode 100644
index 0000000..5aa332b
--- /dev/null
+++ b/drivers/crypto/caam/error.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM Error Reporting code header
+ *
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CAAM_ERROR_H
+#define CAAM_ERROR_H
+#define CAAM_ERROR_STR_MAX 302
+void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+		  int rowsize, int groupsize, struct scatterlist *sg,
+		  size_t tlen, bool ascii);
+#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
new file mode 100644
index 0000000..babc78a
--- /dev/null
+++ b/drivers/crypto/caam/intern.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM/SEC 4.x driver backend
+ * Private/internal definitions between modules
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef INTERN_H
+#define INTERN_H
+
+/* Currently comes from Kconfig param as a ^2 (driver-required) */
+#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
+
+/* Kconfig params for interrupt coalescing if selected (else zero) */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
+#define JOBR_INTC JRCFG_ICEN
+#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
+#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
+#else
+#define JOBR_INTC 0
+#define JOBR_INTC_TIME_THLD 0
+#define JOBR_INTC_COUNT_THLD 0
+#endif
+
+/*
+ * Storage for tracking each in-process entry moving across a ring
+ * Each entry on an output ring needs one of these
+ */
+struct caam_jrentry_info {
+	void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
+	void *cbkarg;	/* Argument per ring entry */
+	u32 *desc_addr_virt;	/* Stored virt addr for postprocessing */
+	dma_addr_t desc_addr_dma;	/* Stored bus addr for done matching */
+	u32 desc_size;	/* Stored size for postprocessing, header derived */
+};
+
+/* Private sub-storage for a single JobR */
+struct caam_drv_private_jr {
+	struct list_head	list_node;	/* Job Ring device list */
+	struct device		*dev;
+	int ridx;
+	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
+	struct tasklet_struct irqtask;
+	int irq;			/* One per queue */
+
+	/* Number of scatterlist crypt transforms active on the JobR */
+	atomic_t tfm_count ____cacheline_aligned;
+
+	/* Job ring info */
+	int ringsize;	/* Size of rings (assume input = output) */
+	struct caam_jrentry_info *entinfo;	/* Alloc'ed 1 per ring entry */
+	spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
+	int inp_ring_write_index;	/* Input index "tail" */
+	int head;			/* entinfo (s/w ring) head index */
+	dma_addr_t *inpring;	/* Base of input ring, alloc DMA-safe */
+	spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
+	int out_ring_read_index;	/* Output index "tail" */
+	int tail;			/* entinfo (s/w ring) tail index */
+	struct jr_outentry *outring;	/* Base of output ring, DMA-safe */
+};
+
+/*
+ * Driver-private storage for a single CAAM block instance
+ */
+struct caam_drv_private {
+#ifdef CONFIG_CAAM_QI
+	struct device *qidev;
+#endif
+
+	/* Physical-presence section */
+	struct caam_ctrl __iomem *ctrl; /* controller region */
+	struct caam_deco __iomem *deco; /* DECO/CCB views */
+	struct caam_assurance __iomem *assure;
+	struct caam_queue_if __iomem *qi; /* QI control region */
+	struct caam_job_ring __iomem *jr[4];	/* JobR's register space */
+
+	/*
+	 * Detected geometry block. Filled in from device tree if powerpc,
+	 * or from register-based version detection code
+	 */
+	u8 total_jobrs;		/* Total Job Rings in device */
+	u8 qi_present;		/* Nonzero if QI present in device */
+	u8 mc_en;		/* Nonzero if MC f/w is active */
+	int secvio_irq;		/* Security violation interrupt number */
+	int virt_en;		/* Virtualization enabled in CAAM */
+	int era;		/* CAAM Era (internal HW revision) */
+
+#define	RNG4_MAX_HANDLES 2
+	/* RNG4 block */
+	u32 rng4_sh_init;	/* This bitmap shows which of the State
+				   Handles of the RNG4 block are initialized
+				   by this driver */
+
+	struct clk *caam_ipg;
+	struct clk *caam_mem;
+	struct clk *caam_aclk;
+	struct clk *caam_emi_slow;
+
+	/*
+	 * debugfs entries for developer view into driver/device
+	 * variables at runtime.
+	 */
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dfs_root;
+	struct dentry *ctl; /* controller dir */
+	struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
+	struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
+#endif
+};
+
+void caam_jr_algapi_init(struct device *dev);
+void caam_jr_algapi_remove(struct device *dev);
+
+#ifdef CONFIG_DEBUG_FS
+static int caam_debugfs_u64_get(void *data, u64 *val)
+{
+	*val = caam64_to_cpu(*(u64 *)data);
+	return 0;
+}
+
+static int caam_debugfs_u32_get(void *data, u64 *val)
+{
+	*val = caam32_to_cpu(*(u32 *)data);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+#endif
+
+#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
new file mode 100644
index 0000000..acdd720
--- /dev/null
+++ b/drivers/crypto/caam/jr.c
@@ -0,0 +1,582 @@
+/*
+ * CAAM/SEC 4.x transport/backend driver
+ * JobR backend functionality
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include "compat.h"
+#include "ctrl.h"
+#include "regs.h"
+#include "jr.h"
+#include "desc.h"
+#include "intern.h"
+
+struct jr_driver_data {
+	/* List of Physical JobR's with the Driver */
+	struct list_head	jr_list;
+	spinlock_t		jr_alloc_lock;	/* jr_list lock */
+} ____cacheline_aligned;
+
+static struct jr_driver_data driver_data;
+
+static int caam_reset_hw_jr(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	unsigned int timeout = 100000;
+
+	/*
+	 * mask interrupts since we are going to poll
+	 * for reset completion status
+	 */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+
+	/* initiate flush (required prior to reset) */
+	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+		JRINT_ERR_HALT_INPROGRESS) && --timeout)
+		cpu_relax();
+
+	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+		return -EIO;
+	}
+
+	/* initiate reset */
+	timeout = 100000;
+	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+		cpu_relax();
+
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+		return -EIO;
+	}
+
+	/* unmask interrupts */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+
+	return 0;
+}
+
+/*
+ * Shutdown JobR independent of platform property code
+ */
+static int caam_jr_shutdown(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	dma_addr_t inpbusaddr, outbusaddr;
+	int ret;
+
+	ret = caam_reset_hw_jr(dev);
+
+	tasklet_kill(&jrp->irqtask);
+
+	/* Release interrupt */
+	free_irq(jrp->irq, dev);
+
+	/* Free rings */
+	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+			  jrp->inpring, inpbusaddr);
+	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+			  jrp->outring, outbusaddr);
+	kfree(jrp->entinfo);
+
+	return ret;
+}
+
+static int caam_jr_remove(struct platform_device *pdev)
+{
+	int ret;
+	struct device *jrdev;
+	struct caam_drv_private_jr *jrpriv;
+
+	jrdev = &pdev->dev;
+	jrpriv = dev_get_drvdata(jrdev);
+
+	/*
+	 * Return EBUSY if job ring already allocated.
+	 */
+	if (atomic_read(&jrpriv->tfm_count)) {
+		dev_err(jrdev, "Device is busy\n");
+		return -EBUSY;
+	}
+
+	/* Remove the node from Physical JobR list maintained by driver */
+	spin_lock(&driver_data.jr_alloc_lock);
+	list_del(&jrpriv->list_node);
+	spin_unlock(&driver_data.jr_alloc_lock);
+
+	/* Release ring */
+	ret = caam_jr_shutdown(jrdev);
+	if (ret)
+		dev_err(jrdev, "Failed to shut down job ring\n");
+	irq_dispose_mapping(jrpriv->irq);
+
+	return ret;
+}
+
+/* Main per-ring interrupt handler */
+static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+{
+	struct device *dev = st_dev;
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	u32 irqstate;
+
+	/*
+	 * Check the output ring for ready responses, kick
+	 * tasklet if jobs done.
+	 */
+	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
+	if (!irqstate)
+		return IRQ_NONE;
+
+	/*
+	 * If JobR error, we got more development work to do
+	 * Flag a bug now, but we really need to shut down and
+	 * restart the queue (and fix code).
+	 */
+	if (irqstate & JRINT_JR_ERROR) {
+		dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
+		BUG();
+	}
+
+	/* mask valid interrupts */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
+
+	/* Have valid interrupt at this point, just ACK and trigger */
+	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
+
+	preempt_disable();
+	tasklet_schedule(&jrp->irqtask);
+	preempt_enable();
+
+	return IRQ_HANDLED;
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
+{
+	int hw_idx, sw_idx, i, head, tail;
+	struct device *dev = (struct device *)devarg;
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+	u32 *userdesc, userstatus;
+	void *userarg;
+
+	while (rd_reg32(&jrp->rregs->outring_used)) {
+
+		head = READ_ONCE(jrp->head);
+
+		spin_lock(&jrp->outlock);
+
+		sw_idx = tail = jrp->tail;
+		hw_idx = jrp->out_ring_read_index;
+
+		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
+			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
+
+			if (jrp->outring[hw_idx].desc ==
+			    caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
+				break; /* found */
+		}
+		/* we should never fail to find a matching descriptor */
+		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
+
+		/* Unmap just-run descriptor so we can post-process */
+		dma_unmap_single(dev,
+				 caam_dma_to_cpu(jrp->outring[hw_idx].desc),
+				 jrp->entinfo[sw_idx].desc_size,
+				 DMA_TO_DEVICE);
+
+		/* mark completed, avoid matching on a recycled desc addr */
+		jrp->entinfo[sw_idx].desc_addr_dma = 0;
+
+		/* Stash callback params for use outside of lock */
+		usercall = jrp->entinfo[sw_idx].callbk;
+		userarg = jrp->entinfo[sw_idx].cbkarg;
+		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
+		userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
+
+		/*
+		 * Make sure all information from the job has been obtained
+		 * before telling CAAM that the job has been removed from the
+		 * output ring.
+		 */
+		mb();
+
+		/* set done */
+		wr_reg32(&jrp->rregs->outring_rmvd, 1);
+
+		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
+					   (JOBR_DEPTH - 1);
+
+		/*
+		 * if this job completed out-of-order, do not increment
+		 * the tail.  Otherwise, increment tail by 1 plus the
+		 * number of subsequent jobs already completed out-of-order
+		 */
+		if (sw_idx == tail) {
+			do {
+				tail = (tail + 1) & (JOBR_DEPTH - 1);
+			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+				 jrp->entinfo[tail].desc_addr_dma == 0);
+
+			jrp->tail = tail;
+		}
+
+		spin_unlock(&jrp->outlock);
+
+		/* Finally, execute user's callback */
+		usercall(dev, userdesc, userstatus, userarg);
+	}
+
+	/* reenable / unmask IRQs */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+}
+
+/**
+ * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
+ *
+ * returns :  pointer to the newly allocated physical
+ *	      JobR dev can be written to if successful.
+ **/
+struct device *caam_jr_alloc(void)
+{
+	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+	struct device *dev = ERR_PTR(-ENODEV);
+	int min_tfm_cnt	= INT_MAX;
+	int tfm_cnt;
+
+	spin_lock(&driver_data.jr_alloc_lock);
+
+	if (list_empty(&driver_data.jr_list)) {
+		spin_unlock(&driver_data.jr_alloc_lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
+		tfm_cnt = atomic_read(&jrpriv->tfm_count);
+		if (tfm_cnt < min_tfm_cnt) {
+			min_tfm_cnt = tfm_cnt;
+			min_jrpriv = jrpriv;
+		}
+		if (!min_tfm_cnt)
+			break;
+	}
+
+	if (min_jrpriv) {
+		atomic_inc(&min_jrpriv->tfm_count);
+		dev = min_jrpriv->dev;
+	}
+	spin_unlock(&driver_data.jr_alloc_lock);
+
+	return dev;
+}
+EXPORT_SYMBOL(caam_jr_alloc);
+
+/**
+ * caam_jr_free() - Free the Job Ring
+ * @rdev     - points to the dev that identifies the Job ring to
+ *             be released.
+ **/
+void caam_jr_free(struct device *rdev)
+{
+	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
+
+	atomic_dec(&jrpriv->tfm_count);
+}
+EXPORT_SYMBOL(caam_jr_free);
+
+/**
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
+ * -EBUSY if the queue is full, -EIO if it cannot map the caller's
+ * descriptor.
+ * @dev:  device of the job ring to be used. This device should have
+ *        been assigned prior by caam_jr_register().
+ * @desc: points to a job descriptor that execute our request. All
+ *        descriptors (and all referenced data) must be in a DMAable
+ *        region, and all data references must be physical addresses
+ *        accessible to CAAM (i.e. within a PAMU window granted
+ *        to it).
+ * @cbk:  pointer to a callback function to be invoked upon completion
+ *        of this request. This has the form:
+ *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
+ *        where:
+ *        @dev:    contains the job ring device that processed this
+ *                 response.
+ *        @desc:   descriptor that initiated the request, same as
+ *                 "desc" being argued to caam_jr_enqueue().
+ *        @status: untranslated status received from CAAM. See the
+ *                 reference manual for a detailed description of
+ *                 error meaning, or see the JRSTA definitions in the
+ *                 register header file
+ *        @areq:   optional pointer to an argument passed with the
+ *                 original request
+ * @areq: optional pointer to a user argument for use at callback
+ *        time.
+ **/
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+		    void (*cbk)(struct device *dev, u32 *desc,
+				u32 status, void *areq),
+		    void *areq)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	struct caam_jrentry_info *head_entry;
+	int head, tail, desc_size;
+	dma_addr_t desc_dma;
+
+	desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
+	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, desc_dma)) {
+		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
+		return -EIO;
+	}
+
+	spin_lock_bh(&jrp->inplock);
+
+	head = jrp->head;
+	tail = READ_ONCE(jrp->tail);
+
+	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
+		spin_unlock_bh(&jrp->inplock);
+		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
+		return -EBUSY;
+	}
+
+	head_entry = &jrp->entinfo[head];
+	head_entry->desc_addr_virt = desc;
+	head_entry->desc_size = desc_size;
+	head_entry->callbk = (void *)cbk;
+	head_entry->cbkarg = areq;
+	head_entry->desc_addr_dma = desc_dma;
+
+	jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
+
+	/*
+	 * Guarantee that the descriptor's DMA address has been written to
+	 * the next slot in the ring before the write index is updated, since
+	 * other cores may update this index independently.
+	 */
+	smp_wmb();
+
+	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
+				    (JOBR_DEPTH - 1);
+	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+
+	/*
+	 * Ensure that all job information has been written before
+	 * notifying CAAM that a new job was added to the input ring.
+	 */
+	wmb();
+
+	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+
+	spin_unlock_bh(&jrp->inplock);
+
+	return 0;
+}
+EXPORT_SYMBOL(caam_jr_enqueue);
+
+/*
+ * Init JobR independent of platform property detection
+ */
+static int caam_jr_init(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp;
+	dma_addr_t inpbusaddr, outbusaddr;
+	int i, error;
+
+	jrp = dev_get_drvdata(dev);
+
+	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
+	/* Connect job ring interrupt handler. */
+	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+			    dev_name(dev), dev);
+	if (error) {
+		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+			jrp->ridx, jrp->irq);
+		goto out_kill_deq;
+	}
+
+	error = caam_reset_hw_jr(dev);
+	if (error)
+		goto out_free_irq;
+
+	error = -ENOMEM;
+	jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
+					  JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
+	if (!jrp->inpring)
+		goto out_free_irq;
+
+	jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
+					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+	if (!jrp->outring)
+		goto out_free_inpring;
+
+	jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
+	if (!jrp->entinfo)
+		goto out_free_outring;
+
+	for (i = 0; i < JOBR_DEPTH; i++)
+		jrp->entinfo[i].desc_addr_dma = !0;
+
+	/* Setup rings */
+	jrp->inp_ring_write_index = 0;
+	jrp->out_ring_read_index = 0;
+	jrp->head = 0;
+	jrp->tail = 0;
+
+	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+	jrp->ringsize = JOBR_DEPTH;
+
+	spin_lock_init(&jrp->inplock);
+	spin_lock_init(&jrp->outlock);
+
+	/* Select interrupt coalescing parameters */
+	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
+		      (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+		      (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+
+	return 0;
+
+out_free_outring:
+	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+			  jrp->outring, outbusaddr);
+out_free_inpring:
+	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+			  jrp->inpring, inpbusaddr);
+	dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
+out_free_irq:
+	free_irq(jrp->irq, dev);
+out_kill_deq:
+	tasklet_kill(&jrp->irqtask);
+	return error;
+}
+
+
+/*
+ * Probe routine for each detected JobR subsystem.
+ */
+static int caam_jr_probe(struct platform_device *pdev)
+{
+	struct device *jrdev;
+	struct device_node *nprop;
+	struct caam_job_ring __iomem *ctrl;
+	struct caam_drv_private_jr *jrpriv;
+	static int total_jobrs;
+	int error;
+
+	jrdev = &pdev->dev;
+	jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
+	if (!jrpriv)
+		return -ENOMEM;
+
+	dev_set_drvdata(jrdev, jrpriv);
+
+	/* save ring identity relative to detection */
+	jrpriv->ridx = total_jobrs++;
+
+	nprop = pdev->dev.of_node;
+	/* Get configuration properties from device tree */
+	/* First, get register page */
+	ctrl = of_iomap(nprop, 0);
+	if (!ctrl) {
+		dev_err(jrdev, "of_iomap() failed\n");
+		return -ENOMEM;
+	}
+
+	jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
+
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		if (caam_dpaa2)
+			error = dma_set_mask_and_coherent(jrdev,
+							  DMA_BIT_MASK(49));
+		else if (of_device_is_compatible(nprop,
+						 "fsl,sec-v5.0-job-ring"))
+			error = dma_set_mask_and_coherent(jrdev,
+							  DMA_BIT_MASK(40));
+		else
+			error = dma_set_mask_and_coherent(jrdev,
+							  DMA_BIT_MASK(36));
+	} else {
+		error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
+	}
+	if (error) {
+		dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
+			error);
+		iounmap(ctrl);
+		return error;
+	}
+
+	/* Identify the interrupt */
+	jrpriv->irq = irq_of_parse_and_map(nprop, 0);
+
+	/* Now do the platform independent part */
+	error = caam_jr_init(jrdev); /* now turn on hardware */
+	if (error) {
+		irq_dispose_mapping(jrpriv->irq);
+		iounmap(ctrl);
+		return error;
+	}
+
+	jrpriv->dev = jrdev;
+	spin_lock(&driver_data.jr_alloc_lock);
+	list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+	spin_unlock(&driver_data.jr_alloc_lock);
+
+	atomic_set(&jrpriv->tfm_count, 0);
+
+	return 0;
+}
+
+static const struct of_device_id caam_jr_match[] = {
+	{
+		.compatible = "fsl,sec-v4.0-job-ring",
+	},
+	{
+		.compatible = "fsl,sec4.0-job-ring",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, caam_jr_match);
+
+static struct platform_driver caam_jr_driver = {
+	.driver = {
+		.name = "caam_jr",
+		.of_match_table = caam_jr_match,
+	},
+	.probe       = caam_jr_probe,
+	.remove      = caam_jr_remove,
+};
+
+static int __init jr_driver_init(void)
+{
+	spin_lock_init(&driver_data.jr_alloc_lock);
+	INIT_LIST_HEAD(&driver_data.jr_list);
+	return platform_driver_register(&caam_jr_driver);
+}
+
+static void __exit jr_driver_exit(void)
+{
+	platform_driver_unregister(&caam_jr_driver);
+}
+
+module_init(jr_driver_init);
+module_exit(jr_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM JR request backend");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
new file mode 100644
index 0000000..eab6115
--- /dev/null
+++ b/drivers/crypto/caam/jr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM public-level include definitions for the JobR backend
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef JR_H
+#define JR_H
+
+/* Prototypes for backend-level services exposed to APIs */
+struct device *caam_jr_alloc(void);
+void caam_jr_free(struct device *rdev);
+int caam_jr_enqueue(struct device *dev, u32 *desc,
+		    void (*cbk)(struct device *dev, u32 *desc, u32 status,
+				void *areq),
+		    void *areq);
+
+#endif /* JR_H */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
new file mode 100644
index 0000000..312b5f0
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CAAM/SEC 4.x functions for handling key-generation jobs
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+#include "compat.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "key_gen.h"
+
+void split_key_done(struct device *dev, u32 *desc, u32 err,
+			   void *context)
+{
+	struct split_key_result *res = context;
+
+#ifdef DEBUG
+	dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+	if (err)
+		caam_jr_strstatus(dev, err);
+
+	res->err = err;
+
+	complete(&res->completion);
+}
+EXPORT_SYMBOL(split_key_done);
+/*
+get a split ipad/opad key
+
+Split key generation-----------------------------------------------
+
+[00] 0xb0810008    jobdesc: stidx=1 share=never len=8
+[01] 0x04000014        key: class2->keyreg len=20
+			@0xffe01000
+[03] 0x84410014  operation: cls2-op sha1 hmac init dec
+[04] 0x24940000     fifold: class2 msgdata-last2 len=0 imm
+[05] 0xa4000001       jump: class2 local all ->1 [06]
+[06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
+			@0xffe04000
+*/
+int gen_split_key(struct device *jrdev, u8 *key_out,
+		  struct alginfo * const adata, const u8 *key_in, u32 keylen,
+		  int max_keylen)
+{
+	u32 *desc;
+	struct split_key_result result;
+	dma_addr_t dma_addr_in, dma_addr_out;
+	int ret = -ENOMEM;
+
+	adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+	adata->keylen_pad = split_key_pad_len(adata->algtype &
+					      OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+	dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+		adata->keylen, adata->keylen_pad);
+	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+	if (adata->keylen_pad > max_keylen)
+		return -EINVAL;
+
+	desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+	if (!desc) {
+		dev_err(jrdev, "unable to allocate key input memory\n");
+		return ret;
+	}
+
+	dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
+				     DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, dma_addr_in)) {
+		dev_err(jrdev, "unable to map key input memory\n");
+		goto out_free;
+	}
+
+	dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
+				      DMA_FROM_DEVICE);
+	if (dma_mapping_error(jrdev, dma_addr_out)) {
+		dev_err(jrdev, "unable to map key output memory\n");
+		goto out_unmap_in;
+	}
+
+	init_job_desc(desc, 0);
+	append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+
+	/* Sets MDHA up into an HMAC-INIT */
+	append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+			 OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+			 OP_ALG_AS_INIT);
+
+	/*
+	 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+	 * into both pads inside MDHA
+	 */
+	append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+				FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+	/*
+	 * FIFO_STORE with the explicit split-key content store
+	 * (0x26 output type)
+	 */
+	append_fifo_store(desc, dma_addr_out, adata->keylen,
+			  LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+	result.err = 0;
+	init_completion(&result.completion);
+
+	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+	if (!ret) {
+		/* in progress */
+		wait_for_completion(&result.completion);
+		ret = result.err;
+#ifdef DEBUG
+		print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+			       adata->keylen_pad, 1);
+#endif
+	}
+
+	dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
+			 DMA_FROM_DEVICE);
+out_unmap_in:
+	dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+out_free:
+	kfree(desc);
+	return ret;
+}
+EXPORT_SYMBOL(gen_split_key);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
new file mode 100644
index 0000000..818f78f
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM/SEC 4.x definitions for handling key-generation jobs
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ *        SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+	u32 idx;
+
+	idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+	return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ *        SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+	return ALIGN(split_key_len(hash), 16);
+}
+
+struct split_key_result {
+	struct completion completion;
+	int err;
+};
+
+void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
+
+int gen_split_key(struct device *jrdev, u8 *key_out,
+		  struct alginfo * const adata, const u8 *key_in, u32 keylen,
+		  int max_keylen);
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
new file mode 100644
index 0000000..810f0be
--- /dev/null
+++ b/drivers/crypto/caam/pdb.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM Protocol Data Block (PDB) definition header file
+ *
+ * Copyright 2008-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef CAAM_PDB_H
+#define CAAM_PDB_H
+#include "compat.h"
+
+/*
+ * PDB- IPSec ESP Header Modification Options
+ */
+#define PDBHMO_ESP_DECAP_SHIFT	28
+#define PDBHMO_ESP_ENCAP_SHIFT	28
+/*
+ * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
+ * Options Byte IP version (IPvsn) field:
+ * if IPv4, decrement the inner IP header TTL field (byte 8);
+ * if IPv6 decrement the inner IP header Hop Limit field (byte 7).
+*/
+#define PDBHMO_ESP_DECAP_DEC_TTL	(0x02 << PDBHMO_ESP_DECAP_SHIFT)
+#define PDBHMO_ESP_ENCAP_DEC_TTL	(0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+/*
+ * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte
+ * from the outer IP header to the inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV		(0x01 << PDBHMO_ESP_DECAP_SHIFT)
+/*
+ * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from
+ * the PDB, copy the DF bit from the inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT		(0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+#define PDBNH_ESP_ENCAP_SHIFT		16
+#define PDBNH_ESP_ENCAP_MASK		(0xff << PDBNH_ESP_ENCAP_SHIFT)
+
+#define PDBHDRLEN_ESP_DECAP_SHIFT	16
+#define PDBHDRLEN_MASK			(0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+
+#define PDB_NH_OFFSET_SHIFT		8
+#define PDB_NH_OFFSET_MASK		(0xff << PDB_NH_OFFSET_SHIFT)
+
+/*
+ * PDB - IPSec ESP Encap/Decap Options
+ */
+#define PDBOPTS_ESP_ARSNONE	0x00 /* no antireplay window */
+#define PDBOPTS_ESP_ARS32	0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESP_ARS128	0x80 /* 128-entry antireplay window */
+#define PDBOPTS_ESP_ARS64	0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESP_ARS_MASK	0xc0 /* antireplay window mask */
+#define PDBOPTS_ESP_IVSRC	0x20 /* IV comes from internal random gen */
+#define PDBOPTS_ESP_ESN		0x10 /* extended sequence included */
+#define PDBOPTS_ESP_OUTFMT	0x08 /* output only decapsulation (decap) */
+#define PDBOPTS_ESP_IPHDRSRC	0x08 /* IP header comes from PDB (encap) */
+#define PDBOPTS_ESP_INCIPHDR	0x04 /* Prepend IP header to output frame */
+#define PDBOPTS_ESP_IPVSN	0x02 /* process IPv6 header */
+#define PDBOPTS_ESP_AOFL	0x04 /* adjust out frame len (decap, SEC>=5.3)*/
+#define PDBOPTS_ESP_TUNNEL	0x01 /* tunnel mode next-header byte */
+#define PDBOPTS_ESP_IPV6	0x02 /* ip header version is V6 */
+#define PDBOPTS_ESP_DIFFSERV	0x40 /* copy TOS/TC from inner iphdr */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */
+
+/*
+ * General IPSec encap/decap PDB definitions
+ */
+
+/**
+ * ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
+struct ipsec_encap_cbc {
+	u8 iv[16];
+};
+
+/**
+ * ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ctr {
+	u8 ctr_nonce[4];
+	u32 ctr_initial;
+	u64 iv;
+};
+
+/**
+ * ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ *  b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ *    0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ *  ctr_flags (8b) - counter flags; constant equal to 0x3
+ *  ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ccm {
+	u8 salt[4];
+	u32 ccm_opt;
+	u64 iv;
+};
+
+/**
+ * ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
+struct ipsec_encap_gcm {
+	u8 salt[4];
+	u32 rsvd1;
+	u64 iv;
+};
+
+/**
+ * ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description
+ *  hmo (header manipulation options) - 4b
+ *  reserved - 4b
+ *  next header - 8b
+ *  next header offset - 8b
+ *  option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ *  reserved - 16b
+ *  Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content
+ */
+struct ipsec_encap_pdb {
+	u32 options;
+	u32 seq_num_ext_hi;
+	u32 seq_num;
+	union {
+		struct ipsec_encap_cbc cbc;
+		struct ipsec_encap_ctr ctr;
+		struct ipsec_encap_ccm ccm;
+		struct ipsec_encap_gcm gcm;
+	};
+	u32 spi;
+	u32 ip_hdr_len;
+	u32 ip_hdr[0];
+};
+
+/**
+ * ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_cbc {
+	u32 rsvd[2];
+};
+
+/**
+ * ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ */
+struct ipsec_decap_ctr {
+	u8 ctr_nonce[4];
+	u32 ctr_initial;
+};
+
+/**
+ * ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ *  b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ *    0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ *  ctr_flags (8b) - counter flags; constant equal to 0x3
+ *  ctr_initial (16b) - initial count constant
+ */
+struct ipsec_decap_ccm {
+	u8 salt[4];
+	u32 ccm_opt;
+};
+
+/**
+ * ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_gcm {
+	u8 salt[4];
+	u32 resvd;
+};
+
+/**
+ * ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description
+ *  hmo (header manipulation options) - 4b
+ *  IP header length - 12b
+ *  next header offset - 8b
+ *  option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags)
+ */
+struct ipsec_decap_pdb {
+	u32 options;
+	union {
+		struct ipsec_decap_cbc cbc;
+		struct ipsec_decap_ctr ctr;
+		struct ipsec_decap_ccm ccm;
+		struct ipsec_decap_gcm gcm;
+	};
+	u32 seq_num_ext_hi;
+	u32 seq_num;
+	__be32 anti_replay[4];
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ */
+struct ipsec_deco_dpovrd {
+#define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80
+	u8 ovrd_ecn;
+	u8 ip_hdr_len;
+	u8 nh_offset;
+	u8 next_header; /* reserved if decap */
+};
+
+/*
+ * IEEE 802.11i WiFi Protocol Data Block
+ */
+#define WIFI_PDBOPTS_FCS	0x01
+#define WIFI_PDBOPTS_AR		0x40
+
+struct wifi_encap_pdb {
+	u16 mac_hdr_len;
+	u8 rsvd;
+	u8 options;
+	u8 iv_flags;
+	u8 pri;
+	u16 pn1;
+	u32 pn2;
+	u16 frm_ctrl_mask;
+	u16 seq_ctrl_mask;
+	u8 rsvd1[2];
+	u8 cnst;
+	u8 key_id;
+	u8 ctr_flags;
+	u8 rsvd2;
+	u16 ctr_init;
+};
+
+struct wifi_decap_pdb {
+	u16 mac_hdr_len;
+	u8 rsvd;
+	u8 options;
+	u8 iv_flags;
+	u8 pri;
+	u16 pn1;
+	u32 pn2;
+	u16 frm_ctrl_mask;
+	u16 seq_ctrl_mask;
+	u8 rsvd1[4];
+	u8 ctr_flags;
+	u8 rsvd2;
+	u16 ctr_init;
+};
+
+/*
+ * IEEE 802.16 WiMAX Protocol Data Block
+ */
+#define WIMAX_PDBOPTS_FCS	0x01
+#define WIMAX_PDBOPTS_AR	0x40 /* decap only */
+
+struct wimax_encap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u32 nonce;
+	u8 b0_flags;
+	u8 ctr_flags;
+	u16 ctr_init;
+	/* begin DECO writeback region */
+	u32 pn;
+	/* end DECO writeback region */
+};
+
+struct wimax_decap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u32 nonce;
+	u8 iv_flags;
+	u8 ctr_flags;
+	u16 ctr_init;
+	/* begin DECO writeback region */
+	u32 pn;
+	u8 rsvd1[2];
+	u16 antireplay_len;
+	u64 antireplay_scorecard;
+	/* end DECO writeback region */
+};
+
+/*
+ * IEEE 801.AE MacSEC Protocol Data Block
+ */
+#define MACSEC_PDBOPTS_FCS	0x01
+#define MACSEC_PDBOPTS_AR	0x40 /* used in decap only */
+
+struct macsec_encap_pdb {
+	u16 aad_len;
+	u8 rsvd;
+	u8 options;
+	u64 sci;
+	u16 ethertype;
+	u8 tci_an;
+	u8 rsvd1;
+	/* begin DECO writeback region */
+	u32 pn;
+	/* end DECO writeback region */
+};
+
+struct macsec_decap_pdb {
+	u16 aad_len;
+	u8 rsvd;
+	u8 options;
+	u64 sci;
+	u8 rsvd1[3];
+	/* begin DECO writeback region */
+	u8 antireplay_len;
+	u32 pn;
+	u64 antireplay_scorecard;
+	/* end DECO writeback region */
+};
+
+/*
+ * SSL/TLS/DTLS Protocol Data Blocks
+ */
+
+#define TLS_PDBOPTS_ARS32	0x40
+#define TLS_PDBOPTS_ARS64	0xc0
+#define TLS_PDBOPTS_OUTFMT	0x08
+#define TLS_PDBOPTS_IV_WRTBK	0x02 /* 1.1/1.2/DTLS only */
+#define TLS_PDBOPTS_EXP_RND_IV	0x01 /* 1.1/1.2/DTLS only */
+
+struct tls_block_encap_pdb {
+	u8 type;
+	u8 version[2];
+	u8 options;
+	u64 seq_num;
+	u32 iv[4];
+};
+
+struct tls_stream_encap_pdb {
+	u8 type;
+	u8 version[2];
+	u8 options;
+	u64 seq_num;
+	u8 i;
+	u8 j;
+	u8 rsvd1[2];
+};
+
+struct dtls_block_encap_pdb {
+	u8 type;
+	u8 version[2];
+	u8 options;
+	u16 epoch;
+	u16 seq_num[3];
+	u32 iv[4];
+};
+
+struct tls_block_decap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u64 seq_num;
+	u32 iv[4];
+};
+
+struct tls_stream_decap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u64 seq_num;
+	u8 i;
+	u8 j;
+	u8 rsvd1[2];
+};
+
+struct dtls_block_decap_pdb {
+	u8 rsvd[3];
+	u8 options;
+	u16 epoch;
+	u16 seq_num[3];
+	u32 iv[4];
+	u64 antireplay_scorecard;
+};
+
+/*
+ * SRTP Protocol Data Blocks
+ */
+#define SRTP_PDBOPTS_MKI	0x08
+#define SRTP_PDBOPTS_AR		0x40
+
+struct srtp_encap_pdb {
+	u8 x_len;
+	u8 mki_len;
+	u8 n_tag;
+	u8 options;
+	u32 cnst0;
+	u8 rsvd[2];
+	u16 cnst1;
+	u16 salt[7];
+	u16 cnst2;
+	u32 rsvd1;
+	u32 roc;
+	u32 opt_mki;
+};
+
+struct srtp_decap_pdb {
+	u8 x_len;
+	u8 mki_len;
+	u8 n_tag;
+	u8 options;
+	u32 cnst0;
+	u8 rsvd[2];
+	u16 cnst1;
+	u16 salt[7];
+	u16 cnst2;
+	u16 rsvd1;
+	u16 seq_num;
+	u32 roc;
+	u64 antireplay_scorecard;
+};
+
+/*
+ * DSA/ECDSA Protocol Data Blocks
+ * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar
+ * except for the treatment of "w" for verify, "s" for sign,
+ * and the placement of "a,b".
+ */
+#define DSA_PDB_SGF_SHIFT	24
+#define DSA_PDB_SGF_MASK	(0xff << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_Q		(0x80 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_R		(0x40 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_G		(0x20 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_W		(0x10 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_S		(0x10 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_F		(0x08 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_C		(0x04 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_D		(0x02 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_AB_SIGN	(0x02 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_AB_VERIFY	(0x01 << DSA_PDB_SGF_SHIFT)
+
+#define DSA_PDB_L_SHIFT		7
+#define DSA_PDB_L_MASK		(0x3ff << DSA_PDB_L_SHIFT)
+
+#define DSA_PDB_N_MASK		0x7f
+
+struct dsa_sign_pdb {
+	u32 sgf_ln; /* Use DSA_PDB_ defintions per above */
+	u8 *q;
+	u8 *r;
+	u8 *g;	/* or Gx,y */
+	u8 *s;
+	u8 *f;
+	u8 *c;
+	u8 *d;
+	u8 *ab; /* ECC only */
+	u8 *u;
+};
+
+struct dsa_verify_pdb {
+	u32 sgf_ln;
+	u8 *q;
+	u8 *r;
+	u8 *g;	/* or Gx,y */
+	u8 *w; /* or Wx,y */
+	u8 *f;
+	u8 *c;
+	u8 *d;
+	u8 *tmp; /* temporary data block */
+	u8 *ab; /* only used if ECC processing */
+};
+
+/* RSA Protocol Data Block */
+#define RSA_PDB_SGF_SHIFT       28
+#define RSA_PDB_E_SHIFT         12
+#define RSA_PDB_E_MASK          (0xFFF << RSA_PDB_E_SHIFT)
+#define RSA_PDB_D_SHIFT         12
+#define RSA_PDB_D_MASK          (0xFFF << RSA_PDB_D_SHIFT)
+#define RSA_PDB_Q_SHIFT         12
+#define RSA_PDB_Q_MASK          (0xFFF << RSA_PDB_Q_SHIFT)
+
+#define RSA_PDB_SGF_F           (0x8 << RSA_PDB_SGF_SHIFT)
+#define RSA_PDB_SGF_G           (0x4 << RSA_PDB_SGF_SHIFT)
+#define RSA_PRIV_PDB_SGF_F      (0x4 << RSA_PDB_SGF_SHIFT)
+#define RSA_PRIV_PDB_SGF_G      (0x8 << RSA_PDB_SGF_SHIFT)
+
+#define RSA_PRIV_KEY_FRM_1      0
+#define RSA_PRIV_KEY_FRM_2      1
+#define RSA_PRIV_KEY_FRM_3      2
+
+/**
+ * RSA Encrypt Protocol Data Block
+ * @sgf: scatter-gather field
+ * @f_dma: dma address of input data
+ * @g_dma: dma address of encrypted output data
+ * @n_dma: dma address of RSA modulus
+ * @e_dma: dma address of RSA public exponent
+ * @f_len: length in octets of the input data
+ */
+struct rsa_pub_pdb {
+	u32		sgf;
+	dma_addr_t	f_dma;
+	dma_addr_t	g_dma;
+	dma_addr_t	n_dma;
+	dma_addr_t	e_dma;
+	u32		f_len;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #1
+ * @sgf: scatter-gather field
+ * @g_dma: dma address of encrypted input data
+ * @f_dma: dma address of output data
+ * @n_dma: dma address of RSA modulus
+ * @d_dma: dma address of RSA private exponent
+ */
+struct rsa_priv_f1_pdb {
+	u32		sgf;
+	dma_addr_t	g_dma;
+	dma_addr_t	f_dma;
+	dma_addr_t	n_dma;
+	dma_addr_t	d_dma;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #2
+ * @sgf     : scatter-gather field
+ * @g_dma   : dma address of encrypted input data
+ * @f_dma   : dma address of output data
+ * @d_dma   : dma address of RSA private exponent
+ * @p_dma   : dma address of RSA prime factor p of RSA modulus n
+ * @q_dma   : dma address of RSA prime factor q of RSA modulus n
+ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ *            as internal state buffer. It is assumed to be as long as p.
+ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ *            as internal state buffer. It is assumed to be as long as q.
+ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
+ */
+struct rsa_priv_f2_pdb {
+	u32		sgf;
+	dma_addr_t	g_dma;
+	dma_addr_t	f_dma;
+	dma_addr_t	d_dma;
+	dma_addr_t	p_dma;
+	dma_addr_t	q_dma;
+	dma_addr_t	tmp1_dma;
+	dma_addr_t	tmp2_dma;
+	u32		p_q_len;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #3
+ * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
+ * the RSA modulus.
+ * @sgf     : scatter-gather field
+ * @g_dma   : dma address of encrypted input data
+ * @f_dma   : dma address of output data
+ * @c_dma   : dma address of RSA CRT coefficient
+ * @p_dma   : dma address of RSA prime factor p of RSA modulus n
+ * @q_dma   : dma address of RSA prime factor q of RSA modulus n
+ * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor p
+ * @dp_dma  : dma address of RSA CRT exponent of RSA prime factor q
+ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ *            as internal state buffer. It is assumed to be as long as p.
+ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ *            as internal state buffer. It is assumed to be as long as q.
+ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
+ */
+struct rsa_priv_f3_pdb {
+	u32		sgf;
+	dma_addr_t	g_dma;
+	dma_addr_t	f_dma;
+	dma_addr_t	c_dma;
+	dma_addr_t	p_dma;
+	dma_addr_t	q_dma;
+	dma_addr_t	dp_dma;
+	dma_addr_t	dq_dma;
+	dma_addr_t	tmp1_dma;
+	dma_addr_t	tmp2_dma;
+	u32		p_q_len;
+} __packed;
+
+#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
new file mode 100644
index 0000000..2a8d87e
--- /dev/null
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+#include "caampkc.h"
+#include "desc_constr.h"
+
+/* Descriptor for RSA Public operation */
+void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
+{
+	init_job_desc_pdb(desc, 0, sizeof(*pdb));
+	append_cmd(desc, pdb->sgf);
+	append_ptr(desc, pdb->f_dma);
+	append_ptr(desc, pdb->g_dma);
+	append_ptr(desc, pdb->n_dma);
+	append_ptr(desc, pdb->e_dma);
+	append_cmd(desc, pdb->f_len);
+	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #1 */
+void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
+{
+	init_job_desc_pdb(desc, 0, sizeof(*pdb));
+	append_cmd(desc, pdb->sgf);
+	append_ptr(desc, pdb->g_dma);
+	append_ptr(desc, pdb->f_dma);
+	append_ptr(desc, pdb->n_dma);
+	append_ptr(desc, pdb->d_dma);
+	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+			 RSA_PRIV_KEY_FRM_1);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #2 */
+void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
+{
+	init_job_desc_pdb(desc, 0, sizeof(*pdb));
+	append_cmd(desc, pdb->sgf);
+	append_ptr(desc, pdb->g_dma);
+	append_ptr(desc, pdb->f_dma);
+	append_ptr(desc, pdb->d_dma);
+	append_ptr(desc, pdb->p_dma);
+	append_ptr(desc, pdb->q_dma);
+	append_ptr(desc, pdb->tmp1_dma);
+	append_ptr(desc, pdb->tmp2_dma);
+	append_cmd(desc, pdb->p_q_len);
+	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+			 RSA_PRIV_KEY_FRM_2);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #3 */
+void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
+{
+	init_job_desc_pdb(desc, 0, sizeof(*pdb));
+	append_cmd(desc, pdb->sgf);
+	append_ptr(desc, pdb->g_dma);
+	append_ptr(desc, pdb->f_dma);
+	append_ptr(desc, pdb->c_dma);
+	append_ptr(desc, pdb->p_dma);
+	append_ptr(desc, pdb->q_dma);
+	append_ptr(desc, pdb->dp_dma);
+	append_ptr(desc, pdb->dq_dma);
+	append_ptr(desc, pdb->tmp1_dma);
+	append_ptr(desc, pdb->tmp2_dma);
+	append_cmd(desc, pdb->p_q_len);
+	append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+			 RSA_PRIV_KEY_FRM_3);
+}
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
new file mode 100644
index 0000000..67f7f8c
--- /dev/null
+++ b/drivers/crypto/caam/qi.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CAAM/SEC 4.x QI transport/backend driver
+ * Queue Interface backend functionality
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <soc/fsl/qman.h>
+
+#include "regs.h"
+#include "qi.h"
+#include "desc.h"
+#include "intern.h"
+#include "desc_constr.h"
+
+#define PREHDR_RSLS_SHIFT	31
+
+/*
+ * Use a reasonable backlog of frames (per CPU) as congestion threshold,
+ * so that resources used by the in-flight buffers do not become a memory hog.
+ */
+#define MAX_RSP_FQ_BACKLOG_PER_CPU	256
+
+#define CAAM_QI_ENQUEUE_RETRIES	10000
+
+#define CAAM_NAPI_WEIGHT	63
+
+/*
+ * caam_napi - struct holding CAAM NAPI-related params
+ * @irqtask: IRQ task for QI backend
+ * @p: QMan portal
+ */
+struct caam_napi {
+	struct napi_struct irqtask;
+	struct qman_portal *p;
+};
+
+/*
+ * caam_qi_pcpu_priv - percpu private data structure to main list of pending
+ *                     responses expected on each cpu.
+ * @caam_napi: CAAM NAPI params
+ * @net_dev: netdev used by NAPI
+ * @rsp_fq: response FQ from CAAM
+ */
+struct caam_qi_pcpu_priv {
+	struct caam_napi caam_napi;
+	struct net_device net_dev;
+	struct qman_fq *rsp_fq;
+} ____cacheline_aligned;
+
+static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
+static DEFINE_PER_CPU(int, last_cpu);
+
+/*
+ * caam_qi_priv - CAAM QI backend private params
+ * @cgr: QMan congestion group
+ * @qi_pdev: platform device for QI backend
+ */
+struct caam_qi_priv {
+	struct qman_cgr cgr;
+	struct platform_device *qi_pdev;
+};
+
+static struct caam_qi_priv qipriv ____cacheline_aligned;
+
+/*
+ * This is written by only one core - the one that initialized the CGR - and
+ * read by multiple cores (all the others).
+ */
+bool caam_congested __read_mostly;
+EXPORT_SYMBOL(caam_congested);
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * This is a counter for the number of times the congestion group (where all
+ * the request and response queueus are) reached congestion. Incremented
+ * each time the congestion callback is called with congested == true.
+ */
+static u64 times_congested;
+#endif
+
+/*
+ * CPU from where the module initialised. This is required because QMan driver
+ * requires CGRs to be removed from same CPU from where they were originally
+ * allocated.
+ */
+static int mod_init_cpu;
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
+ * doing malloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ *       being processed. This could be added by the dpaa-ethernet driver.
+ *       This would pose a problem for userspace application processing which
+ *       cannot know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
+{
+	struct qm_fd fd;
+	dma_addr_t addr;
+	int ret;
+	int num_retries = 0;
+
+	qm_fd_clear_fd(&fd);
+	qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
+
+	addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
+			      DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(qidev, addr)) {
+		dev_err(qidev, "DMA mapping error for QI enqueue request\n");
+		return -EIO;
+	}
+	qm_fd_addr_set64(&fd, addr);
+
+	do {
+		ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
+		if (likely(!ret))
+			return 0;
+
+		if (ret != -EBUSY)
+			break;
+		num_retries++;
+	} while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
+
+	dev_err(qidev, "qman_enqueue failed: %d\n", ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(caam_qi_enqueue);
+
+static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
+			   const union qm_mr_entry *msg)
+{
+	const struct qm_fd *fd;
+	struct caam_drv_req *drv_req;
+	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+
+	fd = &msg->ern.fd;
+
+	if (qm_fd_get_format(fd) != qm_fd_compound) {
+		dev_err(qidev, "Non-compound FD from CAAM\n");
+		return;
+	}
+
+	drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+	if (!drv_req) {
+		dev_err(qidev,
+			"Can't find original request for CAAM response\n");
+		return;
+	}
+
+	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+	drv_req->cbk(drv_req, -EIO);
+}
+
+static struct qman_fq *create_caam_req_fq(struct device *qidev,
+					  struct qman_fq *rsp_fq,
+					  dma_addr_t hwdesc,
+					  int fq_sched_flag)
+{
+	int ret;
+	struct qman_fq *req_fq;
+	struct qm_mcc_initfq opts;
+
+	req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
+	if (!req_fq)
+		return ERR_PTR(-ENOMEM);
+
+	req_fq->cb.ern = caam_fq_ern_cb;
+	req_fq->cb.fqs = NULL;
+
+	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+				QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
+	if (ret) {
+		dev_err(qidev, "Failed to create session req FQ\n");
+		goto create_req_fq_fail;
+	}
+
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+				   QM_INITFQ_WE_CONTEXTB |
+				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
+	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
+	qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
+	opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
+	qm_fqd_context_a_set64(&opts.fqd, hwdesc);
+	opts.fqd.cgid = qipriv.cgr.cgrid;
+
+	ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
+	if (ret) {
+		dev_err(qidev, "Failed to init session req FQ\n");
+		goto init_req_fq_fail;
+	}
+
+	dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
+		smp_processor_id());
+	return req_fq;
+
+init_req_fq_fail:
+	qman_destroy_fq(req_fq);
+create_req_fq_fail:
+	kfree(req_fq);
+	return ERR_PTR(ret);
+}
+
+static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
+{
+	int ret;
+
+	ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
+				    QMAN_VOLATILE_FLAG_FINISH,
+				    QM_VDQCR_PRECEDENCE_VDQCR |
+				    QM_VDQCR_NUMFRAMES_TILLEMPTY);
+	if (ret) {
+		dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
+		return ret;
+	}
+
+	do {
+		struct qman_portal *p;
+
+		p = qman_get_affine_portal(smp_processor_id());
+		qman_p_poll_dqrr(p, 16);
+	} while (fq->flags & QMAN_FQ_STATE_NE);
+
+	return 0;
+}
+
+static int kill_fq(struct device *qidev, struct qman_fq *fq)
+{
+	u32 flags;
+	int ret;
+
+	ret = qman_retire_fq(fq, &flags);
+	if (ret < 0) {
+		dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
+		return ret;
+	}
+
+	if (!ret)
+		goto empty_fq;
+
+	/* Async FQ retirement condition */
+	if (ret == 1) {
+		/* Retry till FQ gets in retired state */
+		do {
+			msleep(20);
+		} while (fq->state != qman_fq_state_retired);
+
+		WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
+		WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
+	}
+
+empty_fq:
+	if (fq->flags & QMAN_FQ_STATE_NE) {
+		ret = empty_retired_fq(qidev, fq);
+		if (ret) {
+			dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
+				fq->fqid);
+			return ret;
+		}
+	}
+
+	ret = qman_oos_fq(fq);
+	if (ret)
+		dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
+
+	qman_destroy_fq(fq);
+	kfree(fq);
+
+	return ret;
+}
+
+static int empty_caam_fq(struct qman_fq *fq)
+{
+	int ret;
+	struct qm_mcr_queryfq_np np;
+
+	/* Wait till the older CAAM FQ get empty */
+	do {
+		ret = qman_query_fq_np(fq, &np);
+		if (ret)
+			return ret;
+
+		if (!qm_mcr_np_get(&np, frm_cnt))
+			break;
+
+		msleep(20);
+	} while (1);
+
+	/*
+	 * Give extra time for pending jobs from this FQ in holding tanks
+	 * to get processed
+	 */
+	msleep(20);
+	return 0;
+}
+
+int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
+{
+	int ret;
+	u32 num_words;
+	struct qman_fq *new_fq, *old_fq;
+	struct device *qidev = drv_ctx->qidev;
+
+	num_words = desc_len(sh_desc);
+	if (num_words > MAX_SDLEN) {
+		dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
+		return -EINVAL;
+	}
+
+	/* Note down older req FQ */
+	old_fq = drv_ctx->req_fq;
+
+	/* Create a new req FQ in parked state */
+	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
+				    drv_ctx->context_a, 0);
+	if (unlikely(IS_ERR_OR_NULL(new_fq))) {
+		dev_err(qidev, "FQ allocation for shdesc update failed\n");
+		return PTR_ERR(new_fq);
+	}
+
+	/* Hook up new FQ to context so that new requests keep queuing */
+	drv_ctx->req_fq = new_fq;
+
+	/* Empty and remove the older FQ */
+	ret = empty_caam_fq(old_fq);
+	if (ret) {
+		dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
+
+		/* We can revert to older FQ */
+		drv_ctx->req_fq = old_fq;
+
+		if (kill_fq(qidev, new_fq))
+			dev_warn(qidev, "New CAAM FQ kill failed\n");
+
+		return ret;
+	}
+
+	/*
+	 * Re-initialise pre-header. Set RSLS and SDLEN.
+	 * Update the shared descriptor for driver context.
+	 */
+	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+					   num_words);
+	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+	dma_sync_single_for_device(qidev, drv_ctx->context_a,
+				   sizeof(drv_ctx->sh_desc) +
+				   sizeof(drv_ctx->prehdr),
+				   DMA_BIDIRECTIONAL);
+
+	/* Put the new FQ in scheduled state */
+	ret = qman_schedule_fq(new_fq);
+	if (ret) {
+		dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
+
+		/*
+		 * We can kill new FQ and revert to old FQ.
+		 * Since the desc is already modified, it is success case
+		 */
+
+		drv_ctx->req_fq = old_fq;
+
+		if (kill_fq(qidev, new_fq))
+			dev_warn(qidev, "New CAAM FQ kill failed\n");
+	} else if (kill_fq(qidev, old_fq)) {
+		dev_warn(qidev, "Old CAAM FQ kill failed\n");
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(caam_drv_ctx_update);
+
+struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
+				       int *cpu,
+				       u32 *sh_desc)
+{
+	size_t size;
+	u32 num_words;
+	dma_addr_t hwdesc;
+	struct caam_drv_ctx *drv_ctx;
+	const cpumask_t *cpus = qman_affine_cpus();
+
+	num_words = desc_len(sh_desc);
+	if (num_words > MAX_SDLEN) {
+		dev_err(qidev, "Invalid descriptor len: %d words\n",
+			num_words);
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
+	if (!drv_ctx)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
+	 * and dma-map them.
+	 */
+	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+					   num_words);
+	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+	size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
+	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
+				DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(qidev, hwdesc)) {
+		dev_err(qidev, "DMA map error for preheader + shdesc\n");
+		kfree(drv_ctx);
+		return ERR_PTR(-ENOMEM);
+	}
+	drv_ctx->context_a = hwdesc;
+
+	/* If given CPU does not own the portal, choose another one that does */
+	if (!cpumask_test_cpu(*cpu, cpus)) {
+		int *pcpu = &get_cpu_var(last_cpu);
+
+		*pcpu = cpumask_next(*pcpu, cpus);
+		if (*pcpu >= nr_cpu_ids)
+			*pcpu = cpumask_first(cpus);
+		*cpu = *pcpu;
+
+		put_cpu_var(last_cpu);
+	}
+	drv_ctx->cpu = *cpu;
+
+	/* Find response FQ hooked with this CPU */
+	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
+
+	/* Attach request FQ */
+	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
+					     QMAN_INITFQ_FLAG_SCHED);
+	if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
+		dev_err(qidev, "create_caam_req_fq failed\n");
+		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
+		kfree(drv_ctx);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	drv_ctx->qidev = qidev;
+	return drv_ctx;
+}
+EXPORT_SYMBOL(caam_drv_ctx_init);
+
+void *qi_cache_alloc(gfp_t flags)
+{
+	return kmem_cache_alloc(qi_cache, flags);
+}
+EXPORT_SYMBOL(qi_cache_alloc);
+
+void qi_cache_free(void *obj)
+{
+	kmem_cache_free(qi_cache, obj);
+}
+EXPORT_SYMBOL(qi_cache_free);
+
+static int caam_qi_poll(struct napi_struct *napi, int budget)
+{
+	struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
+
+	int cleaned = qman_p_poll_dqrr(np->p, budget);
+
+	if (cleaned < budget) {
+		napi_complete(napi);
+		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+	}
+
+	return cleaned;
+}
+
+void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
+{
+	if (IS_ERR_OR_NULL(drv_ctx))
+		return;
+
+	/* Remove request FQ */
+	if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
+		dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
+
+	dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
+			 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
+			 DMA_BIDIRECTIONAL);
+	kfree(drv_ctx);
+}
+EXPORT_SYMBOL(caam_drv_ctx_rel);
+
+int caam_qi_shutdown(struct device *qidev)
+{
+	int i, ret;
+	struct caam_qi_priv *priv = dev_get_drvdata(qidev);
+	const cpumask_t *cpus = qman_affine_cpus();
+	struct cpumask old_cpumask = current->cpus_allowed;
+
+	for_each_cpu(i, cpus) {
+		struct napi_struct *irqtask;
+
+		irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
+		napi_disable(irqtask);
+		netif_napi_del(irqtask);
+
+		if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
+			dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
+	}
+
+	/*
+	 * QMan driver requires CGRs to be deleted from same CPU from where they
+	 * were instantiated. Hence we get the module removal execute from the
+	 * same CPU from where it was originally inserted.
+	 */
+	set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+	ret = qman_delete_cgr(&priv->cgr);
+	if (ret)
+		dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
+	else
+		qman_release_cgrid(priv->cgr.cgrid);
+
+	kmem_cache_destroy(qi_cache);
+
+	/* Now that we're done with the CGRs, restore the cpus allowed mask */
+	set_cpus_allowed_ptr(current, &old_cpumask);
+
+	platform_device_unregister(priv->qi_pdev);
+	return ret;
+}
+
+static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
+{
+	caam_congested = congested;
+
+	if (congested) {
+#ifdef CONFIG_DEBUG_FS
+		times_congested++;
+#endif
+		pr_debug_ratelimited("CAAM entered congestion\n");
+
+	} else {
+		pr_debug_ratelimited("CAAM exited congestion\n");
+	}
+}
+
+static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
+{
+	/*
+	 * In case of threaded ISR, for RT kernels in_irq() does not return
+	 * appropriate value, so use in_serving_softirq to distinguish between
+	 * softirq and irq contexts.
+	 */
+	if (unlikely(in_irq() || !in_serving_softirq())) {
+		/* Disable QMan IRQ source and invoke NAPI */
+		qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
+		np->p = p;
+		napi_schedule(&np->irqtask);
+		return 1;
+	}
+	return 0;
+}
+
+static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
+						    struct qman_fq *rsp_fq,
+						    const struct qm_dqrr_entry *dqrr)
+{
+	struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
+	struct caam_drv_req *drv_req;
+	const struct qm_fd *fd;
+	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+	u32 status;
+
+	if (caam_qi_napi_schedule(p, caam_napi))
+		return qman_cb_dqrr_stop;
+
+	fd = &dqrr->fd;
+	status = be32_to_cpu(fd->status);
+	if (unlikely(status)) {
+		u32 ssrc = status & JRSTA_SSRC_MASK;
+		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+		if (ssrc != JRSTA_SSRC_CCB_ERROR ||
+		    err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+			dev_err(qidev, "Error: %#x in CAAM response FD\n",
+				status);
+	}
+
+	if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
+		dev_err(qidev, "Non-compound FD from CAAM\n");
+		return qman_cb_dqrr_consume;
+	}
+
+	drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+	if (unlikely(!drv_req)) {
+		dev_err(qidev,
+			"Can't find original request for caam response\n");
+		return qman_cb_dqrr_consume;
+	}
+
+	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+	drv_req->cbk(drv_req, status);
+	return qman_cb_dqrr_consume;
+}
+
+static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
+{
+	struct qm_mcc_initfq opts;
+	struct qman_fq *fq;
+	int ret;
+
+	fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
+	if (!fq)
+		return -ENOMEM;
+
+	fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
+
+	ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
+			     QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
+	if (ret) {
+		dev_err(qidev, "Rsp FQ create failed\n");
+		kfree(fq);
+		return -ENODEV;
+	}
+
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+				   QM_INITFQ_WE_CONTEXTB |
+				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
+	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
+				       QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
+	qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
+	opts.fqd.cgid = qipriv.cgr.cgrid;
+	opts.fqd.context_a.stashing.exclusive =	QM_STASHING_EXCL_CTX |
+						QM_STASHING_EXCL_DATA;
+	qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
+
+	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+	if (ret) {
+		dev_err(qidev, "Rsp FQ init failed\n");
+		kfree(fq);
+		return -ENODEV;
+	}
+
+	per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
+
+	dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
+	return 0;
+}
+
+static int init_cgr(struct device *qidev)
+{
+	int ret;
+	struct qm_mcc_initcgr opts;
+	const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
+			MAX_RSP_FQ_BACKLOG_PER_CPU;
+
+	ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
+	if (ret) {
+		dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
+		return ret;
+	}
+
+	qipriv.cgr.cb = cgr_cb;
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
+				   QM_CGR_WE_MODE);
+	opts.cgr.cscn_en = QM_CGR_EN;
+	opts.cgr.mode = QMAN_CGR_MODE_FRAME;
+	qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
+
+	ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
+	if (ret) {
+		dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
+			qipriv.cgr.cgrid);
+		return ret;
+	}
+
+	dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
+	return 0;
+}
+
+static int alloc_rsp_fqs(struct device *qidev)
+{
+	int ret, i;
+	const cpumask_t *cpus = qman_affine_cpus();
+
+	/*Now create response FQs*/
+	for_each_cpu(i, cpus) {
+		ret = alloc_rsp_fq_cpu(qidev, i);
+		if (ret) {
+			dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void free_rsp_fqs(void)
+{
+	int i;
+	const cpumask_t *cpus = qman_affine_cpus();
+
+	for_each_cpu(i, cpus)
+		kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
+}
+
+int caam_qi_init(struct platform_device *caam_pdev)
+{
+	int err, i;
+	struct platform_device *qi_pdev;
+	struct device *ctrldev = &caam_pdev->dev, *qidev;
+	struct caam_drv_private *ctrlpriv;
+	const cpumask_t *cpus = qman_affine_cpus();
+	struct cpumask old_cpumask = current->cpus_allowed;
+	static struct platform_device_info qi_pdev_info = {
+		.name = "caam_qi",
+		.id = PLATFORM_DEVID_NONE
+	};
+
+	/*
+	 * QMAN requires CGRs to be removed from same CPU+portal from where it
+	 * was originally allocated. Hence we need to note down the
+	 * initialisation CPU and use the same CPU for module exit.
+	 * We select the first CPU to from the list of portal owning CPUs.
+	 * Then we pin module init to this CPU.
+	 */
+	mod_init_cpu = cpumask_first(cpus);
+	set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+	qi_pdev_info.parent = ctrldev;
+	qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
+	qi_pdev = platform_device_register_full(&qi_pdev_info);
+	if (IS_ERR(qi_pdev))
+		return PTR_ERR(qi_pdev);
+	set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
+
+	ctrlpriv = dev_get_drvdata(ctrldev);
+	qidev = &qi_pdev->dev;
+
+	qipriv.qi_pdev = qi_pdev;
+	dev_set_drvdata(qidev, &qipriv);
+
+	/* Initialize the congestion detection */
+	err = init_cgr(qidev);
+	if (err) {
+		dev_err(qidev, "CGR initialization failed: %d\n", err);
+		platform_device_unregister(qi_pdev);
+		return err;
+	}
+
+	/* Initialise response FQs */
+	err = alloc_rsp_fqs(qidev);
+	if (err) {
+		dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
+		free_rsp_fqs();
+		platform_device_unregister(qi_pdev);
+		return err;
+	}
+
+	/*
+	 * Enable the NAPI contexts on each of the core which has an affine
+	 * portal.
+	 */
+	for_each_cpu(i, cpus) {
+		struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
+		struct caam_napi *caam_napi = &priv->caam_napi;
+		struct napi_struct *irqtask = &caam_napi->irqtask;
+		struct net_device *net_dev = &priv->net_dev;
+
+		net_dev->dev = *qidev;
+		INIT_LIST_HEAD(&net_dev->napi_list);
+
+		netif_napi_add(net_dev, irqtask, caam_qi_poll,
+			       CAAM_NAPI_WEIGHT);
+
+		napi_enable(irqtask);
+	}
+
+	/* Hook up QI device to parent controlling caam device */
+	ctrlpriv->qidev = qidev;
+
+	qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
+				     SLAB_CACHE_DMA, NULL);
+	if (!qi_cache) {
+		dev_err(qidev, "Can't allocate CAAM cache\n");
+		free_rsp_fqs();
+		platform_device_unregister(qi_pdev);
+		return -ENOMEM;
+	}
+
+	/* Done with the CGRs; restore the cpus allowed mask */
+	set_cpus_allowed_ptr(current, &old_cpumask);
+#ifdef CONFIG_DEBUG_FS
+	debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+			    &times_congested, &caam_fops_u64_ro);
+#endif
+	dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
+	return 0;
+}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
new file mode 100644
index 0000000..357b69f
--- /dev/null
+++ b/drivers/crypto/caam/qi.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Public definitions for the CAAM/QI (Queue Interface) backend.
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#ifndef __QI_H__
+#define __QI_H__
+
+#include <soc/fsl/qman.h>
+#include "compat.h"
+#include "desc.h"
+#include "desc_constr.h"
+
+/*
+ * CAAM hardware constructs a job descriptor which points to a shared descriptor
+ * (as pointed by context_a of to-CAAM FQ).
+ * When the job descriptor is executed by DECO, the whole job descriptor
+ * together with shared descriptor gets loaded in DECO buffer, which is
+ * 64 words (each 32-bit) long.
+ *
+ * The job descriptor constructed by CAAM hardware has the following layout:
+ *
+ *	HEADER		(1 word)
+ *	Shdesc ptr	(1 or 2 words)
+ *	SEQ_OUT_PTR	(1 word)
+ *	Out ptr		(1 or 2 words)
+ *	Out length	(1 word)
+ *	SEQ_IN_PTR	(1 word)
+ *	In ptr		(1 or 2 words)
+ *	In length	(1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
+ *
+ * Apart from shdesc contents, the total number of words that get loaded in DECO
+ * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
+ * storing shared descriptor.
+ */
+#define MAX_SDLEN	((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE	768
+
+extern bool caam_congested __read_mostly;
+
+/*
+ * This is the request structure the driver application should fill while
+ * submitting a job to driver.
+ */
+struct caam_drv_req;
+
+/*
+ * caam_qi_cbk - application's callback function invoked by the driver when the
+ *               request has been successfully processed.
+ * @drv_req: original request that was submitted
+ * @status: completion status of request (0 - success, non-zero - error code)
+ */
+typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
+
+enum optype {
+	ENCRYPT,
+	DECRYPT,
+	GIVENCRYPT,
+	NUM_OP
+};
+
+/**
+ * caam_drv_ctx - CAAM/QI backend driver context
+ *
+ * The jobs are processed by the driver against a driver context.
+ * With every cryptographic context, a driver context is attached.
+ * The driver context contains data for private use by driver.
+ * For the applications, this is an opaque structure.
+ *
+ * @prehdr: preheader placed before shrd desc
+ * @sh_desc: shared descriptor
+ * @context_a: shared descriptor dma address
+ * @req_fq: to-CAAM request frame queue
+ * @rsp_fq: from-CAAM response frame queue
+ * @cpu: cpu on which to receive CAAM response
+ * @op_type: operation type
+ * @qidev: device pointer for CAAM/QI backend
+ */
+struct caam_drv_ctx {
+	u32 prehdr[2];
+	u32 sh_desc[MAX_SDLEN];
+	dma_addr_t context_a;
+	struct qman_fq *req_fq;
+	struct qman_fq *rsp_fq;
+	int cpu;
+	enum optype op_type;
+	struct device *qidev;
+} ____cacheline_aligned;
+
+/**
+ * caam_drv_req - The request structure the driver application should fill while
+ *                submitting a job to driver.
+ * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
+ *          buffers.
+ * @cbk: callback function to invoke when job is completed
+ * @app_ctx: arbitrary context attached with request by the application
+ *
+ * The fields mentioned below should not be used by application.
+ * These are for private use by driver.
+ *
+ * @hdr__: linked list header to maintain list of outstanding requests to CAAM
+ * @hwaddr: DMA address for the S/G table.
+ */
+struct caam_drv_req {
+	struct qm_sg_entry fd_sgt[2];
+	struct caam_drv_ctx *drv_ctx;
+	caam_qi_cbk cbk;
+	void *app_ctx;
+} ____cacheline_aligned;
+
+/**
+ * caam_drv_ctx_init - Initialise a CAAM/QI driver context
+ *
+ * A CAAM/QI driver context must be attached with each cryptographic context.
+ * This function allocates memory for CAAM/QI context and returns a handle to
+ * the application. This handle must be submitted along with each enqueue
+ * request to the driver by the application.
+ *
+ * @cpu: CPU where the application prefers to the driver to receive CAAM
+ *       responses. The request completion callback would be issued from this
+ *       CPU.
+ * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
+ *           context.
+ *
+ * Returns a driver context on success or negative error code on failure.
+ */
+struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
+				       u32 *sh_desc);
+
+/**
+ * caam_qi_enqueue - Submit a request to QI backend driver.
+ *
+ * The request structure must be properly filled as described above.
+ *
+ * @qidev: device pointer for QI backend
+ * @req: CAAM QI request structure
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
+
+/**
+ * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
+ *		       or too many CAAM responses are pending to be processed.
+ * @drv_ctx: driver context for which job is to be submitted
+ *
+ * Returns caam congestion status 'true/false'
+ */
+bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
+
+/**
+ * caam_drv_ctx_update - Update QI driver context
+ *
+ * Invoked when shared descriptor is required to be change in driver context.
+ *
+ * @drv_ctx: driver context to be updated
+ * @sh_desc: new shared descriptor pointer to be updated in QI driver context
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
+
+/**
+ * caam_drv_ctx_rel - Release a QI driver context
+ * @drv_ctx: context to be released
+ */
+void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
+
+int caam_qi_init(struct platform_device *pdev);
+int caam_qi_shutdown(struct device *dev);
+
+/**
+ * qi_cache_alloc - Allocate buffers from CAAM-QI cache
+ *
+ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
+ * to be allocated on the hotpath. Instead of using malloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of 256B, which is sufficient for hosting 16 SG entries.
+ *
+ * @flags: flags that would be used for the equivalent malloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+void *qi_cache_alloc(gfp_t flags);
+
+/**
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
+ * the buffer previously allocated by a qi_cache_alloc call.
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ *
+ * @obj: object previously allocated using qi_cache_alloc()
+ */
+void qi_cache_free(void *obj);
+
+#endif /* __QI_H__ */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
new file mode 100644
index 0000000..ce3f9ad
--- /dev/null
+++ b/drivers/crypto/caam/regs.h
@@ -0,0 +1,895 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM hardware register-level view
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef REGS_H
+#define REGS_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+/*
+ * Architecture-specific register access methods
+ *
+ * CAAM's bus-addressable registers are 64 bits internally.
+ * They have been wired to be safely accessible on 32-bit
+ * architectures, however. Registers were organized such
+ * that (a) they can be contained in 32 bits, (b) if not, then they
+ * can be treated as two 32-bit entities, or finally (c) if they
+ * must be treated as a single 64-bit value, then this can safely
+ * be done with two 32-bit cycles.
+ *
+ * For 32-bit operations on 64-bit values, CAAM follows the same
+ * 64-bit register access conventions as it's predecessors, in that
+ * writes are "triggered" by a write to the register at the numerically
+ * higher address, thus, a full 64-bit write cycle requires a write
+ * to the lower address, followed by a write to the higher address,
+ * which will latch/execute the write cycle.
+ *
+ * For example, let's assume a SW reset of CAAM through the master
+ * configuration register.
+ * - SWRST is in bit 31 of MCFG.
+ * - MCFG begins at base+0x0000.
+ * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
+ * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
+ *
+ * (and on Power, the convention is 0-31, 32-63, I know...)
+ *
+ * Assuming a 64-bit write to this MCFG to perform a software reset
+ * would then require a write of 0 to base+0x0000, followed by a
+ * write of 0x80000000 to base+0x0004, which would "execute" the
+ * reset.
+ *
+ * Of course, since MCFG 63-32 is all zero, we could cheat and simply
+ * write 0x8000000 to base+0x0004, and the reset would work fine.
+ * However, since CAAM does contain some write-and-read-intended
+ * 64-bit registers, this code defines 64-bit access methods for
+ * the sake of internal consistency and simplicity, and so that a
+ * clean transition to 64-bit is possible when it becomes necessary.
+ *
+ * There are limitations to this that the developer must recognize.
+ * 32-bit architectures cannot enforce an atomic-64 operation,
+ * Therefore:
+ *
+ * - On writes, since the HW is assumed to latch the cycle on the
+ *   write of the higher-numeric-address word, then ordered
+ *   writes work OK.
+ *
+ * - For reads, where a register contains a relevant value of more
+ *   that 32 bits, the hardware employs logic to latch the other
+ *   "half" of the data until read, ensuring an accurate value.
+ *   This is of particular relevance when dealing with CAAM's
+ *   performance counters.
+ *
+ */
+
+extern bool caam_little_end;
+extern bool caam_imx;
+
+#define caam_to_cpu(len)						\
+static inline u##len caam##len ## _to_cpu(u##len val)			\
+{									\
+	if (caam_little_end)						\
+		return le##len ## _to_cpu((__force __le##len)val);	\
+	else								\
+		return be##len ## _to_cpu((__force __be##len)val);	\
+}
+
+#define cpu_to_caam(len)					\
+static inline u##len cpu_to_caam##len(u##len val)		\
+{								\
+	if (caam_little_end)					\
+		return (__force u##len)cpu_to_le##len(val);	\
+	else							\
+		return (__force u##len)cpu_to_be##len(val);	\
+}
+
+caam_to_cpu(16)
+caam_to_cpu(32)
+caam_to_cpu(64)
+cpu_to_caam(16)
+cpu_to_caam(32)
+cpu_to_caam(64)
+
+static inline void wr_reg32(void __iomem *reg, u32 data)
+{
+	if (caam_little_end)
+		iowrite32(data, reg);
+	else
+		iowrite32be(data, reg);
+}
+
+static inline u32 rd_reg32(void __iomem *reg)
+{
+	if (caam_little_end)
+		return ioread32(reg);
+
+	return ioread32be(reg);
+}
+
+static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set)
+{
+	if (caam_little_end)
+		iowrite32((ioread32(reg) & ~clear) | set, reg);
+	else
+		iowrite32be((ioread32be(reg) & ~clear) | set, reg);
+}
+
+/*
+ * The only users of these wr/rd_reg64 functions is the Job Ring (JR).
+ * The DMA address registers in the JR are handled differently depending on
+ * platform:
+ *
+ * 1. All BE CAAM platforms and i.MX platforms (LE CAAM):
+ *
+ *    base + 0x0000 : most-significant 32 bits
+ *    base + 0x0004 : least-significant 32 bits
+ *
+ * The 32-bit version of this core therefore has to write to base + 0x0004
+ * to set the 32-bit wide DMA address.
+ *
+ * 2. All other LE CAAM platforms (LS1021A etc.)
+ *    base + 0x0000 : least-significant 32 bits
+ *    base + 0x0004 : most-significant 32 bits
+ */
+#ifdef CONFIG_64BIT
+static inline void wr_reg64(void __iomem *reg, u64 data)
+{
+	if (caam_little_end)
+		iowrite64(data, reg);
+	else
+		iowrite64be(data, reg);
+}
+
+static inline u64 rd_reg64(void __iomem *reg)
+{
+	if (caam_little_end)
+		return ioread64(reg);
+	else
+		return ioread64be(reg);
+}
+
+#else /* CONFIG_64BIT */
+static inline void wr_reg64(void __iomem *reg, u64 data)
+{
+	if (!caam_imx && caam_little_end) {
+		wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
+		wr_reg32((u32 __iomem *)(reg), data);
+	} else {
+		wr_reg32((u32 __iomem *)(reg), data >> 32);
+		wr_reg32((u32 __iomem *)(reg) + 1, data);
+	}
+}
+
+static inline u64 rd_reg64(void __iomem *reg)
+{
+	if (!caam_imx && caam_little_end)
+		return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
+			(u64)rd_reg32((u32 __iomem *)(reg)));
+
+	return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+		(u64)rd_reg32((u32 __iomem *)(reg) + 1));
+}
+#endif /* CONFIG_64BIT  */
+
+static inline u64 cpu_to_caam_dma64(dma_addr_t value)
+{
+	if (caam_imx)
+		return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
+			 (u64)cpu_to_caam32(upper_32_bits(value)));
+
+	return cpu_to_caam64(value);
+}
+
+static inline u64 caam_dma64_to_cpu(u64 value)
+{
+	if (caam_imx)
+		return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
+			 (u64)caam32_to_cpu(upper_32_bits(value)));
+
+	return caam64_to_cpu(value);
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
+#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
+#else
+#define cpu_to_caam_dma(value) cpu_to_caam32(value)
+#define caam_dma_to_cpu(value) caam32_to_cpu(value)
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+
+/*
+ * jr_outentry
+ * Represents each entry in a JobR output ring
+ */
+struct jr_outentry {
+	dma_addr_t desc;/* Pointer to completed descriptor */
+	u32 jrstatus;	/* Status for completed descriptor */
+} __packed;
+
+/*
+ * caam_perfmon - Performance Monitor/Secure Memory Status/
+ *                CAAM Global Status/Component Version IDs
+ *
+ * Spans f00-fff wherever instantiated
+ */
+
+/* Number of DECOs */
+#define CHA_NUM_MS_DECONUM_SHIFT	24
+#define CHA_NUM_MS_DECONUM_MASK	(0xfull << CHA_NUM_MS_DECONUM_SHIFT)
+
+/*
+ * CHA version IDs / instantiation bitfields
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
+#define CHA_ID_LS_AES_SHIFT	0
+#define CHA_ID_LS_AES_MASK	(0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_LP	(0x3ull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_HP	(0x4ull << CHA_ID_LS_AES_SHIFT)
+
+#define CHA_ID_LS_DES_SHIFT	4
+#define CHA_ID_LS_DES_MASK	(0xfull << CHA_ID_LS_DES_SHIFT)
+
+#define CHA_ID_LS_ARC4_SHIFT	8
+#define CHA_ID_LS_ARC4_MASK	(0xfull << CHA_ID_LS_ARC4_SHIFT)
+
+#define CHA_ID_LS_MD_SHIFT	12
+#define CHA_ID_LS_MD_MASK	(0xfull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP256	(0x0ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP512	(0x1ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_HP		(0x2ull << CHA_ID_LS_MD_SHIFT)
+
+#define CHA_ID_LS_RNG_SHIFT	16
+#define CHA_ID_LS_RNG_MASK	(0xfull << CHA_ID_LS_RNG_SHIFT)
+
+#define CHA_ID_LS_SNW8_SHIFT	20
+#define CHA_ID_LS_SNW8_MASK	(0xfull << CHA_ID_LS_SNW8_SHIFT)
+
+#define CHA_ID_LS_KAS_SHIFT	24
+#define CHA_ID_LS_KAS_MASK	(0xfull << CHA_ID_LS_KAS_SHIFT)
+
+#define CHA_ID_LS_PK_SHIFT	28
+#define CHA_ID_LS_PK_MASK	(0xfull << CHA_ID_LS_PK_SHIFT)
+
+#define CHA_ID_MS_CRC_SHIFT	0
+#define CHA_ID_MS_CRC_MASK	(0xfull << CHA_ID_MS_CRC_SHIFT)
+
+#define CHA_ID_MS_SNW9_SHIFT	4
+#define CHA_ID_MS_SNW9_MASK	(0xfull << CHA_ID_MS_SNW9_SHIFT)
+
+#define CHA_ID_MS_DECO_SHIFT	24
+#define CHA_ID_MS_DECO_MASK	(0xfull << CHA_ID_MS_DECO_SHIFT)
+
+#define CHA_ID_MS_JR_SHIFT	28
+#define CHA_ID_MS_JR_MASK	(0xfull << CHA_ID_MS_JR_SHIFT)
+
+struct sec_vid {
+	u16 ip_id;
+	u8 maj_rev;
+	u8 min_rev;
+};
+
+struct caam_perfmon {
+	/* Performance Monitor Registers			f00-f9f */
+	u64 req_dequeued;	/* PC_REQ_DEQ - Dequeued Requests	     */
+	u64 ob_enc_req;	/* PC_OB_ENC_REQ - Outbound Encrypt Requests */
+	u64 ib_dec_req;	/* PC_IB_DEC_REQ - Inbound Decrypt Requests  */
+	u64 ob_enc_bytes;	/* PC_OB_ENCRYPT - Outbound Bytes Encrypted  */
+	u64 ob_prot_bytes;	/* PC_OB_PROTECT - Outbound Bytes Protected  */
+	u64 ib_dec_bytes;	/* PC_IB_DECRYPT - Inbound Bytes Decrypted   */
+	u64 ib_valid_bytes;	/* PC_IB_VALIDATED Inbound Bytes Validated   */
+	u64 rsvd[13];
+
+	/* CAAM Hardware Instantiation Parameters		fa0-fbf */
+	u32 cha_rev_ms;		/* CRNR - CHA Rev No. Most significant half*/
+	u32 cha_rev_ls;		/* CRNR - CHA Rev No. Least significant half*/
+#define CTPR_MS_QI_SHIFT	25
+#define CTPR_MS_QI_MASK		(0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_DPAA2		BIT(13)
+#define CTPR_MS_VIRT_EN_INCL	0x00000001
+#define CTPR_MS_VIRT_EN_POR	0x00000002
+#define CTPR_MS_PG_SZ_MASK	0x10
+#define CTPR_MS_PG_SZ_SHIFT	4
+	u32 comp_parms_ms;	/* CTPR - Compile Parameters Register	*/
+	u32 comp_parms_ls;	/* CTPR - Compile Parameters Register	*/
+	u64 rsvd1[2];
+
+	/* CAAM Global Status					fc0-fdf */
+	u64 faultaddr;	/* FAR  - Fault Address		*/
+	u32 faultliodn;	/* FALR - Fault Address LIODN	*/
+	u32 faultdetail;	/* FADR - Fault Addr Detail	*/
+	u32 rsvd2;
+#define CSTA_PLEND		BIT(10)
+#define CSTA_ALT_PLEND		BIT(18)
+	u32 status;		/* CSTA - CAAM Status */
+	u64 rsvd3;
+
+	/* Component Instantiation Parameters			fe0-fff */
+	u32 rtic_id;		/* RVID - RTIC Version ID	*/
+#define CCBVID_ERA_MASK		0xff000000
+#define CCBVID_ERA_SHIFT	24
+	u32 ccb_id;		/* CCBVID - CCB Version ID	*/
+	u32 cha_id_ms;		/* CHAVID - CHA Version ID Most Significant*/
+	u32 cha_id_ls;		/* CHAVID - CHA Version ID Least Significant*/
+	u32 cha_num_ms;		/* CHANUM - CHA Number Most Significant	*/
+	u32 cha_num_ls;		/* CHANUM - CHA Number Least Significant*/
+#define SECVID_MS_IPID_MASK	0xffff0000
+#define SECVID_MS_IPID_SHIFT	16
+#define SECVID_MS_MAJ_REV_MASK	0x0000ff00
+#define SECVID_MS_MAJ_REV_SHIFT	8
+	u32 caam_id_ms;		/* CAAMVID - CAAM Version ID MS	*/
+	u32 caam_id_ls;		/* CAAMVID - CAAM Version ID LS	*/
+};
+
+/* LIODN programming for DMA configuration */
+#define MSTRID_LOCK_LIODN	0x80000000
+#define MSTRID_LOCK_MAKETRUSTED	0x00010000	/* only for JR masterid */
+
+#define MSTRID_LIODN_MASK	0x0fff
+struct masterid {
+	u32 liodn_ms;	/* lock and make-trusted control bits */
+	u32 liodn_ls;	/* LIODN for non-sequence and seq access */
+};
+
+/* Partition ID for DMA configuration */
+struct partid {
+	u32 rsvd1;
+	u32 pidr;	/* partition ID, DECO */
+};
+
+/* RNGB test mode (replicated twice in some configurations) */
+/* Padded out to 0x100 */
+struct rngtst {
+	u32 mode;		/* RTSTMODEx - Test mode */
+	u32 rsvd1[3];
+	u32 reset;		/* RTSTRESETx - Test reset control */
+	u32 rsvd2[3];
+	u32 status;		/* RTSTSSTATUSx - Test status */
+	u32 rsvd3;
+	u32 errstat;		/* RTSTERRSTATx - Test error status */
+	u32 rsvd4;
+	u32 errctl;		/* RTSTERRCTLx - Test error control */
+	u32 rsvd5;
+	u32 entropy;		/* RTSTENTROPYx - Test entropy */
+	u32 rsvd6[15];
+	u32 verifctl;	/* RTSTVERIFCTLx - Test verification control */
+	u32 rsvd7;
+	u32 verifstat;	/* RTSTVERIFSTATx - Test verification status */
+	u32 rsvd8;
+	u32 verifdata;	/* RTSTVERIFDx - Test verification data */
+	u32 rsvd9;
+	u32 xkey;		/* RTSTXKEYx - Test XKEY */
+	u32 rsvd10;
+	u32 oscctctl;	/* RTSTOSCCTCTLx - Test osc. counter control */
+	u32 rsvd11;
+	u32 oscct;		/* RTSTOSCCTx - Test oscillator counter */
+	u32 rsvd12;
+	u32 oscctstat;	/* RTSTODCCTSTATx - Test osc counter status */
+	u32 rsvd13[2];
+	u32 ofifo[4];	/* RTSTOFIFOx - Test output FIFO */
+	u32 rsvd14[15];
+};
+
+/* RNG4 TRNG test registers */
+struct rng4tst {
+#define RTMCTL_PRGM	0x00010000	/* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC	0 /* use von Neumann data in
+						     both entropy shifter and
+						     statistical checker */
+#define RTMCTL_SAMP_MODE_RAW_ES_SC		1 /* use raw data in both
+						     entropy shifter and
+						     statistical checker */
+#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC	2 /* use von Neumann data in
+						     entropy shifter, raw data
+						     in statistical checker */
+#define RTMCTL_SAMP_MODE_INVALID		3 /* invalid combination */
+	u32 rtmctl;		/* misc. control register */
+	u32 rtscmisc;		/* statistical check misc. register */
+	u32 rtpkrrng;		/* poker range register */
+	union {
+		u32 rtpkrmax;	/* PRGM=1: poker max. limit register */
+		u32 rtpkrsq;	/* PRGM=0: poker square calc. result register */
+	};
+#define RTSDCTL_ENT_DLY_SHIFT 16
+#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+#define RTSDCTL_ENT_DLY_MIN 3200
+#define RTSDCTL_ENT_DLY_MAX 12800
+	u32 rtsdctl;		/* seed control register */
+	union {
+		u32 rtsblim;	/* PRGM=1: sparse bit limit register */
+		u32 rttotsam;	/* PRGM=0: total samples register */
+	};
+	u32 rtfrqmin;		/* frequency count min. limit register */
+#define RTFRQMAX_DISABLE	(1 << 20)
+	union {
+		u32 rtfrqmax;	/* PRGM=1: freq. count max. limit register */
+		u32 rtfrqcnt;	/* PRGM=0: freq. count register */
+	};
+	u32 rsvd1[40];
+#define RDSTA_SKVT 0x80000000
+#define RDSTA_SKVN 0x40000000
+#define RDSTA_IF0 0x00000001
+#define RDSTA_IF1 0x00000002
+#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
+	u32 rdsta;
+	u32 rsvd2[15];
+};
+
+/*
+ * caam_ctrl - basic core configuration
+ * starts base + 0x0000 padded out to 0x1000
+ */
+
+#define KEK_KEY_SIZE		8
+#define TKEK_KEY_SIZE		8
+#define TDSK_KEY_SIZE		8
+
+#define DECO_RESET	1	/* Use with DECO reset/availability regs */
+#define DECO_RESET_0	(DECO_RESET << 0)
+#define DECO_RESET_1	(DECO_RESET << 1)
+#define DECO_RESET_2	(DECO_RESET << 2)
+#define DECO_RESET_3	(DECO_RESET << 3)
+#define DECO_RESET_4	(DECO_RESET << 4)
+
+struct caam_ctrl {
+	/* Basic Configuration Section				000-01f */
+	/* Read/Writable					        */
+	u32 rsvd1;
+	u32 mcr;		/* MCFG      Master Config Register  */
+	u32 rsvd2;
+	u32 scfgr;		/* SCFGR, Security Config Register */
+
+	/* Bus Access Configuration Section			010-11f */
+	/* Read/Writable                                                */
+	struct masterid jr_mid[4];	/* JRxLIODNR - JobR LIODN setup */
+	u32 rsvd3[11];
+	u32 jrstart;			/* JRSTART - Job Ring Start Register */
+	struct masterid rtic_mid[4];	/* RTICxLIODNR - RTIC LIODN setup */
+	u32 rsvd4[5];
+	u32 deco_rsr;			/* DECORSR - Deco Request Source */
+	u32 rsvd11;
+	u32 deco_rq;			/* DECORR - DECO Request */
+	struct partid deco_mid[5];	/* DECOxLIODNR - 1 per DECO */
+	u32 rsvd5[22];
+
+	/* DECO Availability/Reset Section			120-3ff */
+	u32 deco_avail;		/* DAR - DECO availability */
+	u32 deco_reset;		/* DRR - DECO reset */
+	u32 rsvd6[182];
+
+	/* Key Encryption/Decryption Configuration              400-5ff */
+	/* Read/Writable only while in Non-secure mode                  */
+	u32 kek[KEK_KEY_SIZE];	/* JDKEKR - Key Encryption Key */
+	u32 tkek[TKEK_KEY_SIZE];	/* TDKEKR - Trusted Desc KEK */
+	u32 tdsk[TDSK_KEY_SIZE];	/* TDSKR - Trusted Desc Signing Key */
+	u32 rsvd7[32];
+	u64 sknonce;			/* SKNR - Secure Key Nonce */
+	u32 rsvd8[70];
+
+	/* RNG Test/Verification/Debug Access                   600-7ff */
+	/* (Useful in Test/Debug modes only...)                         */
+	union {
+		struct rngtst rtst[2];
+		struct rng4tst r4tst[2];
+	};
+
+	u32 rsvd9[448];
+
+	/* Performance Monitor                                  f00-fff */
+	struct caam_perfmon perfmon;
+};
+
+/*
+ * Controller master config register defs
+ */
+#define MCFGR_SWRESET		0x80000000 /* software reset */
+#define MCFGR_WDENABLE		0x40000000 /* DECO watchdog enable */
+#define MCFGR_WDFAIL		0x20000000 /* DECO watchdog force-fail */
+#define MCFGR_DMA_RESET		0x10000000
+#define MCFGR_LONG_PTR		0x00010000 /* Use >32-bit desc addressing */
+#define SCFGR_RDBENABLE		0x00000400
+#define SCFGR_VIRT_EN		0x00008000
+#define DECORR_RQD0ENABLE	0x00000001 /* Enable DECO0 for direct access */
+#define DECORSR_JR0		0x00000001 /* JR to supply TZ, SDID, ICID */
+#define DECORSR_VALID		0x80000000
+#define DECORR_DEN0		0x00010000 /* DECO0 available for access*/
+
+/* AXI read cache control */
+#define MCFGR_ARCACHE_SHIFT	12
+#define MCFGR_ARCACHE_MASK	(0xf << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_BUFF	(0x1 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_CACH	(0x2 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_RALL	(0x4 << MCFGR_ARCACHE_SHIFT)
+
+/* AXI write cache control */
+#define MCFGR_AWCACHE_SHIFT	8
+#define MCFGR_AWCACHE_MASK	(0xf << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_BUFF	(0x1 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_CACH	(0x2 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_WALL	(0x8 << MCFGR_AWCACHE_SHIFT)
+
+/* AXI pipeline depth */
+#define MCFGR_AXIPIPE_SHIFT	4
+#define MCFGR_AXIPIPE_MASK	(0xf << MCFGR_AXIPIPE_SHIFT)
+
+#define MCFGR_AXIPRI		0x00000008 /* Assert AXI priority sideband */
+#define MCFGR_LARGE_BURST	0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64		0x00000001 /* 64-byte burst size */
+
+/* JRSTART register offsets */
+#define JRSTART_JR0_START       0x00000001 /* Start Job ring 0 */
+#define JRSTART_JR1_START       0x00000002 /* Start Job ring 1 */
+#define JRSTART_JR2_START       0x00000004 /* Start Job ring 2 */
+#define JRSTART_JR3_START       0x00000008 /* Start Job ring 3 */
+
+/*
+ * caam_job_ring - direct job ring setup
+ * 1-4 possible per instantiation, base + 1000/2000/3000/4000
+ * Padded out to 0x1000
+ */
+struct caam_job_ring {
+	/* Input ring */
+	u64 inpring_base;	/* IRBAx -  Input desc ring baseaddr */
+	u32 rsvd1;
+	u32 inpring_size;	/* IRSx - Input ring size */
+	u32 rsvd2;
+	u32 inpring_avail;	/* IRSAx - Input ring room remaining */
+	u32 rsvd3;
+	u32 inpring_jobadd;	/* IRJAx - Input ring jobs added */
+
+	/* Output Ring */
+	u64 outring_base;	/* ORBAx - Output status ring base addr */
+	u32 rsvd4;
+	u32 outring_size;	/* ORSx - Output ring size */
+	u32 rsvd5;
+	u32 outring_rmvd;	/* ORJRx - Output ring jobs removed */
+	u32 rsvd6;
+	u32 outring_used;	/* ORSFx - Output ring slots full */
+
+	/* Status/Configuration */
+	u32 rsvd7;
+	u32 jroutstatus;	/* JRSTAx - JobR output status */
+	u32 rsvd8;
+	u32 jrintstatus;	/* JRINTx - JobR interrupt status */
+	u32 rconfig_hi;	/* JRxCFG - Ring configuration */
+	u32 rconfig_lo;
+
+	/* Indices. CAAM maintains as "heads" of each queue */
+	u32 rsvd9;
+	u32 inp_rdidx;	/* IRRIx - Input ring read index */
+	u32 rsvd10;
+	u32 out_wtidx;	/* ORWIx - Output ring write index */
+
+	/* Command/control */
+	u32 rsvd11;
+	u32 jrcommand;	/* JRCRx - JobR command */
+
+	u32 rsvd12[932];
+
+	/* Performance Monitor                                  f00-fff */
+	struct caam_perfmon perfmon;
+};
+
+#define JR_RINGSIZE_MASK	0x03ff
+/*
+ * jrstatus - Job Ring Output Status
+ * All values in lo word
+ * Also note, same values written out as status through QI
+ * in the command/status field of a frame descriptor
+ */
+#define JRSTA_SSRC_SHIFT            28
+#define JRSTA_SSRC_MASK             0xf0000000
+
+#define JRSTA_SSRC_NONE             0x00000000
+#define JRSTA_SSRC_CCB_ERROR        0x20000000
+#define JRSTA_SSRC_JUMP_HALT_USER   0x30000000
+#define JRSTA_SSRC_DECO             0x40000000
+#define JRSTA_SSRC_JRERROR          0x60000000
+#define JRSTA_SSRC_JUMP_HALT_CC     0x70000000
+
+#define JRSTA_DECOERR_JUMP          0x08000000
+#define JRSTA_DECOERR_INDEX_SHIFT   8
+#define JRSTA_DECOERR_INDEX_MASK    0xff00
+#define JRSTA_DECOERR_ERROR_MASK    0x00ff
+
+#define JRSTA_DECOERR_NONE          0x00
+#define JRSTA_DECOERR_LINKLEN       0x01
+#define JRSTA_DECOERR_LINKPTR       0x02
+#define JRSTA_DECOERR_JRCTRL        0x03
+#define JRSTA_DECOERR_DESCCMD       0x04
+#define JRSTA_DECOERR_ORDER         0x05
+#define JRSTA_DECOERR_KEYCMD        0x06
+#define JRSTA_DECOERR_LOADCMD       0x07
+#define JRSTA_DECOERR_STORECMD      0x08
+#define JRSTA_DECOERR_OPCMD         0x09
+#define JRSTA_DECOERR_FIFOLDCMD     0x0a
+#define JRSTA_DECOERR_FIFOSTCMD     0x0b
+#define JRSTA_DECOERR_MOVECMD       0x0c
+#define JRSTA_DECOERR_JUMPCMD       0x0d
+#define JRSTA_DECOERR_MATHCMD       0x0e
+#define JRSTA_DECOERR_SHASHCMD      0x0f
+#define JRSTA_DECOERR_SEQCMD        0x10
+#define JRSTA_DECOERR_DECOINTERNAL  0x11
+#define JRSTA_DECOERR_SHDESCHDR     0x12
+#define JRSTA_DECOERR_HDRLEN        0x13
+#define JRSTA_DECOERR_BURSTER       0x14
+#define JRSTA_DECOERR_DESCSIGNATURE 0x15
+#define JRSTA_DECOERR_DMA           0x16
+#define JRSTA_DECOERR_BURSTFIFO     0x17
+#define JRSTA_DECOERR_JRRESET       0x1a
+#define JRSTA_DECOERR_JOBFAIL       0x1b
+#define JRSTA_DECOERR_DNRERR        0x80
+#define JRSTA_DECOERR_UNDEFPCL      0x81
+#define JRSTA_DECOERR_PDBERR        0x82
+#define JRSTA_DECOERR_ANRPLY_LATE   0x83
+#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
+#define JRSTA_DECOERR_SEQOVF        0x85
+#define JRSTA_DECOERR_INVSIGN       0x86
+#define JRSTA_DECOERR_DSASIGN       0x87
+
+#define JRSTA_CCBERR_JUMP           0x08000000
+#define JRSTA_CCBERR_INDEX_MASK     0xff00
+#define JRSTA_CCBERR_INDEX_SHIFT    8
+#define JRSTA_CCBERR_CHAID_MASK     0x00f0
+#define JRSTA_CCBERR_CHAID_SHIFT    4
+#define JRSTA_CCBERR_ERRID_MASK     0x000f
+
+#define JRSTA_CCBERR_CHAID_AES      (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_DES      (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_ARC4     (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_MD       (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_RNG      (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_SNOW     (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_KASUMI   (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_PK       (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
+#define JRSTA_CCBERR_CHAID_CRC      (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
+
+#define JRSTA_CCBERR_ERRID_NONE     0x00
+#define JRSTA_CCBERR_ERRID_MODE     0x01
+#define JRSTA_CCBERR_ERRID_DATASIZ  0x02
+#define JRSTA_CCBERR_ERRID_KEYSIZ   0x03
+#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
+#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
+#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
+#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
+#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
+#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
+#define JRSTA_CCBERR_ERRID_ICVCHK   0x0a
+#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
+#define JRSTA_CCBERR_ERRID_CCMAAD   0x0c
+#define JRSTA_CCBERR_ERRID_INVCHA   0x0f
+
+#define JRINT_ERR_INDEX_MASK        0x3fff0000
+#define JRINT_ERR_INDEX_SHIFT       16
+#define JRINT_ERR_TYPE_MASK         0xf00
+#define JRINT_ERR_TYPE_SHIFT        8
+#define JRINT_ERR_HALT_MASK         0xc
+#define JRINT_ERR_HALT_SHIFT        2
+#define JRINT_ERR_HALT_INPROGRESS   0x4
+#define JRINT_ERR_HALT_COMPLETE     0x8
+#define JRINT_JR_ERROR              0x02
+#define JRINT_JR_INT                0x01
+
+#define JRINT_ERR_TYPE_WRITE        1
+#define JRINT_ERR_TYPE_BAD_INPADDR  3
+#define JRINT_ERR_TYPE_BAD_OUTADDR  4
+#define JRINT_ERR_TYPE_INV_INPWRT   5
+#define JRINT_ERR_TYPE_INV_OUTWRT   6
+#define JRINT_ERR_TYPE_RESET        7
+#define JRINT_ERR_TYPE_REMOVE_OFL   8
+#define JRINT_ERR_TYPE_ADD_OFL      9
+
+#define JRCFG_SOE		0x04
+#define JRCFG_ICEN		0x02
+#define JRCFG_IMSK		0x01
+#define JRCFG_ICDCT_SHIFT	8
+#define JRCFG_ICTT_SHIFT	16
+
+#define JRCR_RESET                  0x01
+
+/*
+ * caam_assurance - Assurance Controller View
+ * base + 0x6000 padded out to 0x1000
+ */
+
+struct rtic_element {
+	u64 address;
+	u32 rsvd;
+	u32 length;
+};
+
+struct rtic_block {
+	struct rtic_element element[2];
+};
+
+struct rtic_memhash {
+	u32 memhash_be[32];
+	u32 memhash_le[32];
+};
+
+struct caam_assurance {
+    /* Status/Command/Watchdog */
+	u32 rsvd1;
+	u32 status;		/* RSTA - Status */
+	u32 rsvd2;
+	u32 cmd;		/* RCMD - Command */
+	u32 rsvd3;
+	u32 ctrl;		/* RCTL - Control */
+	u32 rsvd4;
+	u32 throttle;	/* RTHR - Throttle */
+	u32 rsvd5[2];
+	u64 watchdog;	/* RWDOG - Watchdog Timer */
+	u32 rsvd6;
+	u32 rend;		/* REND - Endian corrections */
+	u32 rsvd7[50];
+
+	/* Block access/configuration @ 100/110/120/130 */
+	struct rtic_block memblk[4];	/* Memory Blocks A-D */
+	u32 rsvd8[32];
+
+	/* Block hashes @ 200/300/400/500 */
+	struct rtic_memhash hash[4];	/* Block hash values A-D */
+	u32 rsvd_3[640];
+};
+
+/*
+ * caam_queue_if - QI configuration and control
+ * starts base + 0x7000, padded out to 0x1000 long
+ */
+
+struct caam_queue_if {
+	u32 qi_control_hi;	/* QICTL  - QI Control */
+	u32 qi_control_lo;
+	u32 rsvd1;
+	u32 qi_status;	/* QISTA  - QI Status */
+	u32 qi_deq_cfg_hi;	/* QIDQC  - QI Dequeue Configuration */
+	u32 qi_deq_cfg_lo;
+	u32 qi_enq_cfg_hi;	/* QISEQC - QI Enqueue Command     */
+	u32 qi_enq_cfg_lo;
+	u32 rsvd2[1016];
+};
+
+/* QI control bits - low word */
+#define QICTL_DQEN      0x01              /* Enable frame pop          */
+#define QICTL_STOP      0x02              /* Stop dequeue/enqueue      */
+#define QICTL_SOE       0x04              /* Stop on error             */
+
+/* QI control bits - high word */
+#define QICTL_MBSI	0x01
+#define QICTL_MHWSI	0x02
+#define QICTL_MWSI	0x04
+#define QICTL_MDWSI	0x08
+#define QICTL_CBSI	0x10		/* CtrlDataByteSwapInput     */
+#define QICTL_CHWSI	0x20		/* CtrlDataHalfSwapInput     */
+#define QICTL_CWSI	0x40		/* CtrlDataWordSwapInput     */
+#define QICTL_CDWSI	0x80		/* CtrlDataDWordSwapInput    */
+#define QICTL_MBSO	0x0100
+#define QICTL_MHWSO	0x0200
+#define QICTL_MWSO	0x0400
+#define QICTL_MDWSO	0x0800
+#define QICTL_CBSO	0x1000		/* CtrlDataByteSwapOutput    */
+#define QICTL_CHWSO	0x2000		/* CtrlDataHalfSwapOutput    */
+#define QICTL_CWSO	0x4000		/* CtrlDataWordSwapOutput    */
+#define QICTL_CDWSO     0x8000		/* CtrlDataDWordSwapOutput   */
+#define QICTL_DMBS	0x010000
+#define QICTL_EPO	0x020000
+
+/* QI status bits */
+#define QISTA_PHRDERR   0x01              /* PreHeader Read Error      */
+#define QISTA_CFRDERR   0x02              /* Compound Frame Read Error */
+#define QISTA_OFWRERR   0x04              /* Output Frame Read Error   */
+#define QISTA_BPDERR    0x08              /* Buffer Pool Depleted      */
+#define QISTA_BTSERR    0x10              /* Buffer Undersize          */
+#define QISTA_CFWRERR   0x20              /* Compound Frame Write Err  */
+#define QISTA_STOPD     0x80000000        /* QI Stopped (see QICTL)    */
+
+/* deco_sg_table - DECO view of scatter/gather table */
+struct deco_sg_table {
+	u64 addr;		/* Segment Address */
+	u32 elen;		/* E, F bits + 30-bit length */
+	u32 bpid_offset;	/* Buffer Pool ID + 16-bit length */
+};
+
+/*
+ * caam_deco - descriptor controller - CHA cluster block
+ *
+ * Only accessible when direct DECO access is turned on
+ * (done in DECORR, via MID programmed in DECOxMID
+ *
+ * 5 typical, base + 0x8000/9000/a000/b000
+ * Padded out to 0x1000 long
+ */
+struct caam_deco {
+	u32 rsvd1;
+	u32 cls1_mode;	/* CxC1MR -  Class 1 Mode */
+	u32 rsvd2;
+	u32 cls1_keysize;	/* CxC1KSR - Class 1 Key Size */
+	u32 cls1_datasize_hi;	/* CxC1DSR - Class 1 Data Size */
+	u32 cls1_datasize_lo;
+	u32 rsvd3;
+	u32 cls1_icvsize;	/* CxC1ICVSR - Class 1 ICV size */
+	u32 rsvd4[5];
+	u32 cha_ctrl;	/* CCTLR - CHA control */
+	u32 rsvd5;
+	u32 irq_crtl;	/* CxCIRQ - CCB interrupt done/error/clear */
+	u32 rsvd6;
+	u32 clr_written;	/* CxCWR - Clear-Written */
+	u32 ccb_status_hi;	/* CxCSTA - CCB Status/Error */
+	u32 ccb_status_lo;
+	u32 rsvd7[3];
+	u32 aad_size;	/* CxAADSZR - Current AAD Size */
+	u32 rsvd8;
+	u32 cls1_iv_size;	/* CxC1IVSZR - Current Class 1 IV Size */
+	u32 rsvd9[7];
+	u32 pkha_a_size;	/* PKASZRx - Size of PKHA A */
+	u32 rsvd10;
+	u32 pkha_b_size;	/* PKBSZRx - Size of PKHA B */
+	u32 rsvd11;
+	u32 pkha_n_size;	/* PKNSZRx - Size of PKHA N */
+	u32 rsvd12;
+	u32 pkha_e_size;	/* PKESZRx - Size of PKHA E */
+	u32 rsvd13[24];
+	u32 cls1_ctx[16];	/* CxC1CTXR - Class 1 Context @100 */
+	u32 rsvd14[48];
+	u32 cls1_key[8];	/* CxC1KEYR - Class 1 Key @200 */
+	u32 rsvd15[121];
+	u32 cls2_mode;	/* CxC2MR - Class 2 Mode */
+	u32 rsvd16;
+	u32 cls2_keysize;	/* CxX2KSR - Class 2 Key Size */
+	u32 cls2_datasize_hi;	/* CxC2DSR - Class 2 Data Size */
+	u32 cls2_datasize_lo;
+	u32 rsvd17;
+	u32 cls2_icvsize;	/* CxC2ICVSZR - Class 2 ICV Size */
+	u32 rsvd18[56];
+	u32 cls2_ctx[18];	/* CxC2CTXR - Class 2 Context @500 */
+	u32 rsvd19[46];
+	u32 cls2_key[32];	/* CxC2KEYR - Class2 Key @600 */
+	u32 rsvd20[84];
+	u32 inp_infofifo_hi;	/* CxIFIFO - Input Info FIFO @7d0 */
+	u32 inp_infofifo_lo;
+	u32 rsvd21[2];
+	u64 inp_datafifo;	/* CxDFIFO - Input Data FIFO */
+	u32 rsvd22[2];
+	u64 out_datafifo;	/* CxOFIFO - Output Data FIFO */
+	u32 rsvd23[2];
+	u32 jr_ctl_hi;	/* CxJRR - JobR Control Register      @800 */
+	u32 jr_ctl_lo;
+	u64 jr_descaddr;	/* CxDADR - JobR Descriptor Address */
+#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
+	u32 op_status_hi;	/* DxOPSTA - DECO Operation Status */
+	u32 op_status_lo;
+	u32 rsvd24[2];
+	u32 liodn;		/* DxLSR - DECO LIODN Status - non-seq */
+	u32 td_liodn;	/* DxLSR - DECO LIODN Status - trustdesc */
+	u32 rsvd26[6];
+	u64 math[4];		/* DxMTH - Math register */
+	u32 rsvd27[8];
+	struct deco_sg_table gthr_tbl[4];	/* DxGTR - Gather Tables */
+	u32 rsvd28[16];
+	struct deco_sg_table sctr_tbl[4];	/* DxSTR - Scatter Tables */
+	u32 rsvd29[48];
+	u32 descbuf[64];	/* DxDESB - Descriptor buffer */
+	u32 rscvd30[193];
+#define DESC_DBG_DECO_STAT_HOST_ERR	0x00D00000
+#define DESC_DBG_DECO_STAT_VALID	0x80000000
+#define DESC_DBG_DECO_STAT_MASK		0x00F00000
+	u32 desc_dbg;		/* DxDDR - DECO Debug Register */
+	u32 rsvd31[126];
+};
+
+#define DECO_JQCR_WHL		0x20000000
+#define DECO_JQCR_FOUR		0x10000000
+
+#define JR_BLOCK_NUMBER		1
+#define ASSURE_BLOCK_NUMBER	6
+#define QI_BLOCK_NUMBER		7
+#define DECO_BLOCK_NUMBER	8
+#define PG_SIZE_4K		0x1000
+#define PG_SIZE_64K		0x10000
+#endif /* REGS_H */
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
new file mode 100644
index 0000000..d000b4d
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SG_SW_QM_H
+#define __SG_SW_QM_H
+
+#include <soc/fsl/qman.h>
+#include "regs.h"
+
+static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
+				  u16 offset)
+{
+	qm_sg_entry_set64(qm_sg_ptr, dma);
+	qm_sg_ptr->__reserved2 = 0;
+	qm_sg_ptr->bpid = 0;
+	qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
+}
+
+static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
+				    dma_addr_t dma, u32 len, u16 offset)
+{
+	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
+	qm_sg_entry_set_len(qm_sg_ptr, len);
+}
+
+static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
+					 dma_addr_t dma, u32 len, u16 offset)
+{
+	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
+	qm_sg_entry_set_f(qm_sg_ptr, len);
+}
+
+static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
+					dma_addr_t dma, u32 len, u16 offset)
+{
+	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
+	qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
+}
+
+static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
+					     dma_addr_t dma, u32 len,
+					     u16 offset)
+{
+	__dma_to_qm_sg(qm_sg_ptr, dma, offset);
+	qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
+				     (len & QM_SG_LEN_MASK));
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct qm_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+	    struct qm_sg_entry *qm_sg_ptr, u16 offset)
+{
+	while (sg_count && sg) {
+		dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+				 sg_dma_len(sg), offset);
+		qm_sg_ptr++;
+		sg = sg_next(sg);
+		sg_count--;
+	}
+	return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+				    struct qm_sg_entry *qm_sg_ptr, u16 offset)
+{
+	qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+	qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
+}
+
+#endif /* __SG_SW_QM_H */
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
new file mode 100644
index 0000000..b5b4c12
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the names of the above-listed copyright holders nor the
+ *	 names of any contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SG_SW_QM2_H_
+#define _SG_SW_QM2_H_
+
+#include <soc/fsl/dpaa2-fd.h>
+
+static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
+				    dma_addr_t dma, u32 len, u16 offset)
+{
+	dpaa2_sg_set_addr(qm_sg_ptr, dma);
+	dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
+	dpaa2_sg_set_final(qm_sg_ptr, false);
+	dpaa2_sg_set_len(qm_sg_ptr, len);
+	dpaa2_sg_set_bpid(qm_sg_ptr, 0);
+	dpaa2_sg_set_offset(qm_sg_ptr, offset);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct dpaa2_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+	    struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
+{
+	while (sg_count && sg) {
+		dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+				 sg_dma_len(sg), offset);
+		qm_sg_ptr++;
+		sg = sg_next(sg);
+		sg_count--;
+	}
+	return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+				    struct dpaa2_sg_entry *qm_sg_ptr,
+				    u16 offset)
+{
+	qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+	dpaa2_sg_set_final(qm_sg_ptr, true);
+}
+
+#endif /* _SG_SW_QM2_H_ */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
new file mode 100644
index 0000000..dbfa9fc
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CAAM/SEC 4.x functions for using scatterlists in caam driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef _SG_SW_SEC4_H_
+#define _SG_SW_SEC4_H_
+
+#include "ctrl.h"
+#include "regs.h"
+#include "sg_sw_qm2.h"
+#include <soc/fsl/dpaa2-fd.h>
+
+struct sec4_sg_entry {
+	u64 ptr;
+	u32 len;
+	u32 bpid_offset;
+};
+
+/*
+ * convert single dma address to h/w link table format
+ */
+static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
+				      dma_addr_t dma, u32 len, u16 offset)
+{
+	if (caam_dpaa2) {
+		dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
+				 offset);
+	} else {
+		sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
+		sec4_sg_ptr->len = cpu_to_caam32(len);
+		sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
+							 SEC4_SG_OFFSET_MASK);
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
+		       sizeof(struct sec4_sg_entry), 1);
+#endif
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct sec4_sg_entry *
+sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+	      struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
+{
+	while (sg_count) {
+		dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
+				   sg_dma_len(sg), offset);
+		sec4_sg_ptr++;
+		sg = sg_next(sg);
+		sg_count--;
+	}
+	return sec4_sg_ptr - 1;
+}
+
+static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
+{
+	if (caam_dpaa2)
+		dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
+	else
+		sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+				      struct sec4_sg_entry *sec4_sg_ptr,
+				      u16 offset)
+{
+	sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+	sg_to_sec4_set_last(sec4_sg_ptr);
+}
+
+#endif /* _SG_SW_SEC4_H_ */