v4.19.13 snapshot.
diff --git a/drivers/crypto/inside-secure/Makefile b/drivers/crypto/inside-secure/Makefile
new file mode 100644
index 0000000..302f07d
--- /dev/null
+++ b/drivers/crypto/inside-secure/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o
+crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
new file mode 100644
index 0000000..86c699c
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -0,0 +1,1215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include "safexcel.h"
+
+static u32 max_rings = EIP197_MAX_RINGS;
+module_param(max_rings, uint, 0644);
+MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
+
+static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
+{
+	u32 val, htable_offset;
+	int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+
+	if (priv->version == EIP197B) {
+		cs_rc_max = EIP197B_CS_RC_MAX;
+		cs_ht_wc = EIP197B_CS_HT_WC;
+		cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
+		cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
+	} else {
+		cs_rc_max = EIP197D_CS_RC_MAX;
+		cs_ht_wc = EIP197D_CS_HT_WC;
+		cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
+		cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
+	}
+
+	/* Enable the record cache memory access */
+	val = readl(priv->base + EIP197_CS_RAM_CTRL);
+	val &= ~EIP197_TRC_ENABLE_MASK;
+	val |= EIP197_TRC_ENABLE_0;
+	writel(val, priv->base + EIP197_CS_RAM_CTRL);
+
+	/* Clear all ECC errors */
+	writel(0, priv->base + EIP197_TRC_ECCCTRL);
+
+	/*
+	 * Make sure the cache memory is accessible by taking record cache into
+	 * reset.
+	 */
+	val = readl(priv->base + EIP197_TRC_PARAMS);
+	val |= EIP197_TRC_PARAMS_SW_RESET;
+	val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
+	writel(val, priv->base + EIP197_TRC_PARAMS);
+
+	/* Clear all records */
+	for (i = 0; i < cs_rc_max; i++) {
+		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
+
+		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
+		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
+		       priv->base + offset);
+
+		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
+		if (i == 0)
+			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
+		else if (i == cs_rc_max - 1)
+			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
+		writel(val, priv->base + offset + sizeof(u32));
+	}
+
+	/* Clear the hash table entries */
+	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
+	for (i = 0; i < cs_ht_wc; i++)
+		writel(GENMASK(29, 0),
+		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
+
+	/* Disable the record cache memory access */
+	val = readl(priv->base + EIP197_CS_RAM_CTRL);
+	val &= ~EIP197_TRC_ENABLE_MASK;
+	writel(val, priv->base + EIP197_CS_RAM_CTRL);
+
+	/* Write head and tail pointers of the record free chain */
+	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
+	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
+	writel(val, priv->base + EIP197_TRC_FREECHAIN);
+
+	/* Configure the record cache #1 */
+	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
+	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
+	writel(val, priv->base + EIP197_TRC_PARAMS2);
+
+	/* Configure the record cache #2 */
+	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
+	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
+	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
+	writel(val, priv->base + EIP197_TRC_PARAMS);
+}
+
+static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
+				  const struct firmware *fw, int pe, u32 ctrl,
+				  u32 prog_en)
+{
+	const u32 *data = (const u32 *)fw->data;
+	u32 val;
+	int i;
+
+	/* Reset the engine to make its program memory accessible */
+	writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
+	       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
+	       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
+	       EIP197_PE(priv) + ctrl);
+
+	/* Enable access to the program memory */
+	writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+
+	/* Write the firmware */
+	for (i = 0; i < fw->size / sizeof(u32); i++)
+		writel(be32_to_cpu(data[i]),
+		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
+
+	/* Disable access to the program memory */
+	writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+
+	/* Release engine from reset */
+	val = readl(EIP197_PE(priv) + ctrl);
+	val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
+	writel(val, EIP197_PE(priv) + ctrl);
+}
+
+static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
+{
+	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
+	const struct firmware *fw[FW_NB];
+	char fw_path[31], *dir = NULL;
+	int i, j, ret = 0, pe;
+	u32 val;
+
+	switch (priv->version) {
+	case EIP197B:
+		dir = "eip197b";
+		break;
+	case EIP197D:
+		dir = "eip197d";
+		break;
+	default:
+		/* No firmware is required */
+		return 0;
+	}
+
+	for (i = 0; i < FW_NB; i++) {
+		snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
+		ret = request_firmware(&fw[i], fw_path, priv->dev);
+		if (ret) {
+			if (priv->version != EIP197B)
+				goto release_fw;
+
+			/* Fallback to the old firmware location for the
+			 * EIP197b.
+			 */
+			ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+			if (ret) {
+				dev_err(priv->dev,
+					"Failed to request firmware %s (%d)\n",
+					fw_name[i], ret);
+				goto release_fw;
+			}
+		}
+	}
+
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Clear the scratchpad memory */
+		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+
+		memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
+			  EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+
+		eip197_write_firmware(priv, fw[FW_IFPP], pe,
+				      EIP197_PE_ICE_FPP_CTRL(pe),
+				      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+
+		eip197_write_firmware(priv, fw[FW_IPUE], pe,
+				      EIP197_PE_ICE_PUE_CTRL(pe),
+				      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+	}
+
+release_fw:
+	for (j = 0; j < i; j++)
+		release_firmware(fw[j]);
+
+	return ret;
+}
+
+static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
+{
+	u32 hdw, cd_size_rnd, val;
+	int i;
+
+	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+	hdw &= GENMASK(27, 25);
+	hdw >>= 25;
+
+	cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* ring base address */
+		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
+		       priv->config.cd_size,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
+		writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
+		       (EIP197_FETCH_COUNT * priv->config.cd_offset),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Configure DMA tx control */
+		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
+		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
+
+		/* clear any pending interrupt */
+		writel(GENMASK(5, 0),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+	}
+
+	return 0;
+}
+
+static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
+{
+	u32 hdw, rd_size_rnd, val;
+	int i;
+
+	hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+	hdw &= GENMASK(27, 25);
+	hdw >>= 25;
+
+	rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* ring base address */
+		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
+		       priv->config.rd_size,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
+
+		writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
+		       (EIP197_FETCH_COUNT * priv->config.rd_offset),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Configure DMA tx control */
+		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
+		writel(val,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
+
+		/* clear any pending interrupt */
+		writel(GENMASK(7, 0),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+		/* enable ring interrupt */
+		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+		val |= EIP197_RDR_IRQ(i);
+		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+	}
+
+	return 0;
+}
+
+static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
+{
+	u32 version, val;
+	int i, ret, pe;
+
+	/* Determine endianess and configure byte swap */
+	version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
+	val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+
+	if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
+		val |= EIP197_MST_CTRL_BYTE_SWAP;
+	else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
+		val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
+
+	/* For EIP197 set maximum number of TX commands to 2^5 = 32 */
+	if (priv->version == EIP197B || priv->version == EIP197D)
+		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+
+	writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+
+	/* Configure wr/rd cache values */
+	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
+	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
+	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
+
+	/* Interrupts reset */
+
+	/* Disable all global interrupts */
+	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
+
+	/* Clear any pending interrupt */
+	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
+
+	/* Processing Engine configuration */
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Data Fetch Engine configuration */
+
+		/* Reset all DFE threads */
+		writel(EIP197_DxE_THR_CTRL_RESET_PE,
+		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		if (priv->version == EIP197B || priv->version == EIP197D) {
+			/* Reset HIA input interface arbiter */
+			writel(EIP197_HIA_RA_PE_CTRL_RESET,
+			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+		}
+
+		/* DMA transfer size to use */
+		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
+		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
+		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
+
+		/* Leave the DFE threads reset state */
+		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		/* Configure the processing engine thresholds */
+		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+		       EIP197_PE_IN_xBUF_THRES_MAX(9),
+		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
+		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+		       EIP197_PE_IN_xBUF_THRES_MAX(7),
+		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
+
+		if (priv->version == EIP197B || priv->version == EIP197D) {
+			/* enable HIA input interface arbiter and rings */
+			writel(EIP197_HIA_RA_PE_CTRL_EN |
+			       GENMASK(priv->config.rings - 1, 0),
+			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+		}
+
+		/* Data Store Engine configuration */
+
+		/* Reset all DSE threads */
+		writel(EIP197_DxE_THR_CTRL_RESET_PE,
+		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+		/* Wait for all DSE threads to complete */
+		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
+			GENMASK(15, 12)) != GENMASK(15, 12))
+			;
+
+		/* DMA transfer size to use */
+		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+		/* FIXME: instability issues can occur for EIP97 but disabling it impact
+		 * performances.
+		 */
+		if (priv->version == EIP197B || priv->version == EIP197D)
+			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
+
+		/* Leave the DSE threads reset state */
+		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+		/* Configure the procesing engine thresholds */
+		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
+		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+		/* Processing Engine configuration */
+
+		/* H/W capabilities selection */
+		val = EIP197_FUNCTION_RSVD;
+		val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+		val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
+		val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
+		val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
+		val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
+		val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
+		val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
+		val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
+		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
+	}
+
+	/* Command Descriptor Rings prepare */
+	for (i = 0; i < priv->config.rings; i++) {
+		/* Clear interrupts for this ring */
+		writel(GENMASK(31, 0),
+		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+
+		/* Disable external triggering */
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Clear the pending prepared counter */
+		writel(EIP197_xDR_PREP_CLR_COUNT,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
+
+		/* Clear the pending processed counter */
+		writel(EIP197_xDR_PROC_CLR_COUNT,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
+
+		writel(0,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
+		writel(0,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
+
+		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
+	}
+
+	/* Result Descriptor Ring prepare */
+	for (i = 0; i < priv->config.rings; i++) {
+		/* Disable external triggering*/
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Clear the pending prepared counter */
+		writel(EIP197_xDR_PREP_CLR_COUNT,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
+
+		/* Clear the pending processed counter */
+		writel(EIP197_xDR_PROC_CLR_COUNT,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
+
+		writel(0,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
+		writel(0,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
+
+		/* Ring size */
+		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
+	}
+
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Enable command descriptor rings */
+		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		/* Enable result descriptor rings */
+		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+	}
+
+	/* Clear any HIA interrupt */
+	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
+
+	if (priv->version == EIP197B || priv->version == EIP197D) {
+		eip197_trc_cache_init(priv);
+
+		ret = eip197_load_firmwares(priv);
+		if (ret)
+			return ret;
+	}
+
+	safexcel_hw_setup_cdesc_rings(priv);
+	safexcel_hw_setup_rdesc_rings(priv);
+
+	return 0;
+}
+
+/* Called with ring's lock taken */
+static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+				       int ring)
+{
+	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
+
+	if (!coal)
+		return;
+
+	/* Configure when we want an interrupt */
+	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+}
+
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
+{
+	struct crypto_async_request *req, *backlog;
+	struct safexcel_context *ctx;
+	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
+
+	/* If a request wasn't properly dequeued because of a lack of resources,
+	 * proceeded it first,
+	 */
+	req = priv->ring[ring].req;
+	backlog = priv->ring[ring].backlog;
+	if (req)
+		goto handle_req;
+
+	while (true) {
+		spin_lock_bh(&priv->ring[ring].queue_lock);
+		backlog = crypto_get_backlog(&priv->ring[ring].queue);
+		req = crypto_dequeue_request(&priv->ring[ring].queue);
+		spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+		if (!req) {
+			priv->ring[ring].req = NULL;
+			priv->ring[ring].backlog = NULL;
+			goto finalize;
+		}
+
+handle_req:
+		ctx = crypto_tfm_ctx(req->tfm);
+		ret = ctx->send(req, ring, &commands, &results);
+		if (ret)
+			goto request_failed;
+
+		if (backlog)
+			backlog->complete(backlog, -EINPROGRESS);
+
+		/* In case the send() helper did not issue any command to push
+		 * to the engine because the input data was cached, continue to
+		 * dequeue other requests as this is valid and not an error.
+		 */
+		if (!commands && !results)
+			continue;
+
+		cdesc += commands;
+		rdesc += results;
+		nreq++;
+	}
+
+request_failed:
+	/* Not enough resources to handle all the requests. Bail out and save
+	 * the request and the backlog for the next dequeue call (per-ring).
+	 */
+	priv->ring[ring].req = req;
+	priv->ring[ring].backlog = backlog;
+
+finalize:
+	if (!nreq)
+		return;
+
+	spin_lock_bh(&priv->ring[ring].lock);
+
+	priv->ring[ring].requests += nreq;
+
+	if (!priv->ring[ring].busy) {
+		safexcel_try_push_requests(priv, ring);
+		priv->ring[ring].busy = true;
+	}
+
+	spin_unlock_bh(&priv->ring[ring].lock);
+
+	/* let the RDR know we have pending descriptors */
+	writel((rdesc * priv->config.rd_offset) << 2,
+	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
+
+	/* let the CDR know we have pending descriptors */
+	writel((cdesc * priv->config.cd_offset) << 2,
+	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
+}
+
+inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+				       struct safexcel_result_desc *rdesc)
+{
+	if (likely(!rdesc->result_data.error_code))
+		return 0;
+
+	if (rdesc->result_data.error_code & 0x407f) {
+		/* Fatal error (bits 0-7, 14) */
+		dev_err(priv->dev,
+			"cipher: result: result descriptor error (%d)\n",
+			rdesc->result_data.error_code);
+		return -EIO;
+	} else if (rdesc->result_data.error_code == BIT(9)) {
+		/* Authentication failed */
+		return -EBADMSG;
+	}
+
+	/* All other non-fatal errors */
+	return -EINVAL;
+}
+
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+				 int ring,
+				 struct safexcel_result_desc *rdesc,
+				 struct crypto_async_request *req)
+{
+	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+	priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+	int i = safexcel_ring_first_rdr_index(priv, ring);
+
+	return priv->ring[ring].rdr_req[i];
+}
+
+void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
+{
+	struct safexcel_command_desc *cdesc;
+
+	/* Acknowledge the command descriptors */
+	do {
+		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
+		if (IS_ERR(cdesc)) {
+			dev_err(priv->dev,
+				"Could not retrieve the command descriptor\n");
+			return;
+		}
+	} while (!cdesc->last_seg);
+}
+
+void safexcel_inv_complete(struct crypto_async_request *req, int error)
+{
+	struct safexcel_inv_result *result = req->data;
+
+	if (error == -EINPROGRESS)
+		return;
+
+	result->error = error;
+	complete(&result->completion);
+}
+
+int safexcel_invalidate_cache(struct crypto_async_request *async,
+			      struct safexcel_crypto_priv *priv,
+			      dma_addr_t ctxr_dma, int ring)
+{
+	struct safexcel_command_desc *cdesc;
+	struct safexcel_result_desc *rdesc;
+	int ret = 0;
+
+	/* Prepare command descriptor */
+	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+	if (IS_ERR(cdesc))
+		return PTR_ERR(cdesc);
+
+	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
+	cdesc->control_data.options = 0;
+	cdesc->control_data.refresh = 0;
+	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
+
+	/* Prepare result descriptor */
+	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
+
+	if (IS_ERR(rdesc)) {
+		ret = PTR_ERR(rdesc);
+		goto cdesc_rollback;
+	}
+
+	safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+	return ret;
+
+cdesc_rollback:
+	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+
+	return ret;
+}
+
+static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
+						     int ring)
+{
+	struct crypto_async_request *req;
+	struct safexcel_context *ctx;
+	int ret, i, nreq, ndesc, tot_descs, handled = 0;
+	bool should_complete;
+
+handle_results:
+	tot_descs = 0;
+
+	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
+	if (!nreq)
+		goto requests_left;
+
+	for (i = 0; i < nreq; i++) {
+		req = safexcel_rdr_req_get(priv, ring);
+
+		ctx = crypto_tfm_ctx(req->tfm);
+		ndesc = ctx->handle_result(priv, ring, req,
+					   &should_complete, &ret);
+		if (ndesc < 0) {
+			dev_err(priv->dev, "failed to handle result (%d)", ndesc);
+			goto acknowledge;
+		}
+
+		if (should_complete) {
+			local_bh_disable();
+			req->complete(req, ret);
+			local_bh_enable();
+		}
+
+		tot_descs += ndesc;
+		handled++;
+	}
+
+acknowledge:
+	if (i) {
+		writel(EIP197_xDR_PROC_xD_PKT(i) |
+		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+	}
+
+	/* If the number of requests overflowed the counter, try to proceed more
+	 * requests.
+	 */
+	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+		goto handle_results;
+
+requests_left:
+	spin_lock_bh(&priv->ring[ring].lock);
+
+	priv->ring[ring].requests -= handled;
+	safexcel_try_push_requests(priv, ring);
+
+	if (!priv->ring[ring].requests)
+		priv->ring[ring].busy = false;
+
+	spin_unlock_bh(&priv->ring[ring].lock);
+}
+
+static void safexcel_dequeue_work(struct work_struct *work)
+{
+	struct safexcel_work_data *data =
+			container_of(work, struct safexcel_work_data, work);
+
+	safexcel_dequeue(data->priv, data->ring);
+}
+
+struct safexcel_ring_irq_data {
+	struct safexcel_crypto_priv *priv;
+	int ring;
+};
+
+static irqreturn_t safexcel_irq_ring(int irq, void *data)
+{
+	struct safexcel_ring_irq_data *irq_data = data;
+	struct safexcel_crypto_priv *priv = irq_data->priv;
+	int ring = irq_data->ring, rc = IRQ_NONE;
+	u32 status, stat;
+
+	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+	if (!status)
+		return rc;
+
+	/* RDR interrupts */
+	if (status & EIP197_RDR_IRQ(ring)) {
+		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
+
+		if (unlikely(stat & EIP197_xDR_ERR)) {
+			/*
+			 * Fatal error, the RDR is unusable and must be
+			 * reinitialized. This should not happen under
+			 * normal circumstances.
+			 */
+			dev_err(priv->dev, "RDR: fatal error.");
+		} else if (likely(stat & EIP197_xDR_THRESH)) {
+			rc = IRQ_WAKE_THREAD;
+		}
+
+		/* ACK the interrupts */
+		writel(stat & 0xff,
+		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
+	}
+
+	/* ACK the interrupts */
+	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+	return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+	struct safexcel_ring_irq_data *irq_data = data;
+	struct safexcel_crypto_priv *priv = irq_data->priv;
+	int ring = irq_data->ring;
+
+	safexcel_handle_result_descriptor(priv, ring);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return IRQ_HANDLED;
+}
+
+static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
+				     irq_handler_t handler,
+				     irq_handler_t threaded_handler,
+				     struct safexcel_ring_irq_data *ring_irq_priv)
+{
+	int ret, irq = platform_get_irq_byname(pdev, name);
+
+	if (irq < 0) {
+		dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
+		return irq;
+	}
+
+	ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
+					threaded_handler, IRQF_ONESHOT,
+					dev_name(&pdev->dev), ring_irq_priv);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
+		return ret;
+	}
+
+	return irq;
+}
+
+static struct safexcel_alg_template *safexcel_algs[] = {
+	&safexcel_alg_ecb_des,
+	&safexcel_alg_cbc_des,
+	&safexcel_alg_ecb_des3_ede,
+	&safexcel_alg_cbc_des3_ede,
+	&safexcel_alg_ecb_aes,
+	&safexcel_alg_cbc_aes,
+	&safexcel_alg_md5,
+	&safexcel_alg_sha1,
+	&safexcel_alg_sha224,
+	&safexcel_alg_sha256,
+	&safexcel_alg_sha384,
+	&safexcel_alg_sha512,
+	&safexcel_alg_hmac_md5,
+	&safexcel_alg_hmac_sha1,
+	&safexcel_alg_hmac_sha224,
+	&safexcel_alg_hmac_sha256,
+	&safexcel_alg_hmac_sha384,
+	&safexcel_alg_hmac_sha512,
+	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
+};
+
+static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
+{
+	int i, j, ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+		safexcel_algs[i]->priv = priv;
+
+		if (!(safexcel_algs[i]->engines & priv->version))
+			continue;
+
+		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
+		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
+		else
+			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
+
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	for (j = 0; j < i; j++) {
+		if (!(safexcel_algs[j]->engines & priv->version))
+			continue;
+
+		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
+		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
+		else
+			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
+	}
+
+	return ret;
+}
+
+static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+		if (!(safexcel_algs[i]->engines & priv->version))
+			continue;
+
+		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
+		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
+		else
+			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
+	}
+}
+
+static void safexcel_configure(struct safexcel_crypto_priv *priv)
+{
+	u32 val, mask = 0;
+
+	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+
+	/* Read number of PEs from the engine */
+	switch (priv->version) {
+	case EIP197B:
+	case EIP197D:
+		mask = EIP197_N_PES_MASK;
+		break;
+	default:
+		mask = EIP97_N_PES_MASK;
+	}
+	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+
+	val = (val & GENMASK(27, 25)) >> 25;
+	mask = BIT(val) - 1;
+
+	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
+
+	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
+	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+
+	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
+	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
+}
+
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+	struct safexcel_register_offsets *offsets = &priv->offsets;
+
+	switch (priv->version) {
+	case EIP197B:
+	case EIP197D:
+		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
+		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
+		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
+		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
+		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
+		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
+		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
+		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
+		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
+		offsets->pe		= EIP197_PE_BASE;
+		break;
+	case EIP97IES:
+		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
+		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
+		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
+		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
+		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
+		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
+		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
+		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
+		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
+		offsets->pe		= EIP97_PE_BASE;
+		break;
+	}
+}
+
+static int safexcel_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct safexcel_crypto_priv *priv;
+	int i, ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+	if (priv->version == EIP197B || priv->version == EIP197D)
+		priv->flags |= EIP197_TRC_CACHE;
+
+	safexcel_init_register_offsets(priv);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base)) {
+		dev_err(dev, "failed to get resource\n");
+		return PTR_ERR(priv->base);
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	ret = PTR_ERR_OR_ZERO(priv->clk);
+	/* The clock isn't mandatory */
+	if  (ret != -ENOENT) {
+		if (ret)
+			return ret;
+
+		ret = clk_prepare_enable(priv->clk);
+		if (ret) {
+			dev_err(dev, "unable to enable clk (%d)\n", ret);
+			return ret;
+		}
+	}
+
+	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
+	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
+	/* The clock isn't mandatory */
+	if  (ret != -ENOENT) {
+		if (ret)
+			goto err_core_clk;
+
+		ret = clk_prepare_enable(priv->reg_clk);
+		if (ret) {
+			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
+			goto err_core_clk;
+		}
+	}
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret)
+		goto err_reg_clk;
+
+	priv->context_pool = dmam_pool_create("safexcel-context", dev,
+					      sizeof(struct safexcel_context_record),
+					      1, 0);
+	if (!priv->context_pool) {
+		ret = -ENOMEM;
+		goto err_reg_clk;
+	}
+
+	safexcel_configure(priv);
+
+	priv->ring = devm_kcalloc(dev, priv->config.rings,
+				  sizeof(*priv->ring),
+				  GFP_KERNEL);
+	if (!priv->ring) {
+		ret = -ENOMEM;
+		goto err_reg_clk;
+	}
+
+	for (i = 0; i < priv->config.rings; i++) {
+		char irq_name[6] = {0}; /* "ringX\0" */
+		char wq_name[9] = {0}; /* "wq_ringX\0" */
+		int irq;
+		struct safexcel_ring_irq_data *ring_irq;
+
+		ret = safexcel_init_ring_descriptors(priv,
+						     &priv->ring[i].cdr,
+						     &priv->ring[i].rdr);
+		if (ret)
+			goto err_reg_clk;
+
+		priv->ring[i].rdr_req = devm_kcalloc(dev,
+			EIP197_DEFAULT_RING_SIZE,
+			sizeof(priv->ring[i].rdr_req),
+			GFP_KERNEL);
+		if (!priv->ring[i].rdr_req) {
+			ret = -ENOMEM;
+			goto err_reg_clk;
+		}
+
+		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
+		if (!ring_irq) {
+			ret = -ENOMEM;
+			goto err_reg_clk;
+		}
+
+		ring_irq->priv = priv;
+		ring_irq->ring = i;
+
+		snprintf(irq_name, 6, "ring%d", i);
+		irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+						safexcel_irq_ring_thread,
+						ring_irq);
+		if (irq < 0) {
+			ret = irq;
+			goto err_reg_clk;
+		}
+
+		priv->ring[i].work_data.priv = priv;
+		priv->ring[i].work_data.ring = i;
+		INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
+
+		snprintf(wq_name, 9, "wq_ring%d", i);
+		priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
+		if (!priv->ring[i].workqueue) {
+			ret = -ENOMEM;
+			goto err_reg_clk;
+		}
+
+		priv->ring[i].requests = 0;
+		priv->ring[i].busy = false;
+
+		crypto_init_queue(&priv->ring[i].queue,
+				  EIP197_DEFAULT_RING_SIZE);
+
+		spin_lock_init(&priv->ring[i].lock);
+		spin_lock_init(&priv->ring[i].queue_lock);
+	}
+
+	platform_set_drvdata(pdev, priv);
+	atomic_set(&priv->ring_used, 0);
+
+	ret = safexcel_hw_init(priv);
+	if (ret) {
+		dev_err(dev, "EIP h/w init failed (%d)\n", ret);
+		goto err_reg_clk;
+	}
+
+	ret = safexcel_register_algorithms(priv);
+	if (ret) {
+		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
+		goto err_reg_clk;
+	}
+
+	return 0;
+
+err_reg_clk:
+	clk_disable_unprepare(priv->reg_clk);
+err_core_clk:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* clear any pending interrupt */
+		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+		/* Reset the CDR base address */
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		/* Reset the RDR base address */
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+	}
+}
+
+static int safexcel_remove(struct platform_device *pdev)
+{
+	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
+	int i;
+
+	safexcel_unregister_algorithms(priv);
+	safexcel_hw_reset_rings(priv);
+
+	clk_disable_unprepare(priv->clk);
+
+	for (i = 0; i < priv->config.rings; i++)
+		destroy_workqueue(priv->ring[i].workqueue);
+
+	return 0;
+}
+
+static const struct of_device_id safexcel_of_match_table[] = {
+	{
+		.compatible = "inside-secure,safexcel-eip97ies",
+		.data = (void *)EIP97IES,
+	},
+	{
+		.compatible = "inside-secure,safexcel-eip197b",
+		.data = (void *)EIP197B,
+	},
+	{
+		.compatible = "inside-secure,safexcel-eip197d",
+		.data = (void *)EIP197D,
+	},
+	{
+		/* Deprecated. Kept for backward compatibility. */
+		.compatible = "inside-secure,safexcel-eip97",
+		.data = (void *)EIP97IES,
+	},
+	{
+		/* Deprecated. Kept for backward compatibility. */
+		.compatible = "inside-secure,safexcel-eip197",
+		.data = (void *)EIP197B,
+	},
+	{},
+};
+
+
+static struct platform_driver  crypto_safexcel = {
+	.probe		= safexcel_probe,
+	.remove		= safexcel_remove,
+	.driver		= {
+		.name	= "crypto-safexcel",
+		.of_match_table = safexcel_of_match_table,
+	},
+};
+module_platform_driver(crypto_safexcel);
+
+MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
+MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
+MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
+MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
new file mode 100644
index 0000000..65624a8
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#ifndef __SAFEXCEL_H__
+#define __SAFEXCEL_H__
+
+#include <crypto/aead.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/skcipher.h>
+
+#define EIP197_HIA_VERSION_LE			0xca35
+#define EIP197_HIA_VERSION_BE			0x35ca
+
+/* Static configuration */
+#define EIP197_DEFAULT_RING_SIZE		400
+#define EIP197_MAX_TOKENS			8
+#define EIP197_MAX_RINGS			4
+#define EIP197_FETCH_COUNT			1
+#define EIP197_MAX_BATCH_SZ			64
+
+#define EIP197_GFP_FLAGS(base)	((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
+				 GFP_KERNEL : GFP_ATOMIC)
+
+/* Custom on-stack requests (for invalidation) */
+#define EIP197_SKCIPHER_REQ_SIZE	sizeof(struct skcipher_request) + \
+					sizeof(struct safexcel_cipher_req)
+#define EIP197_AHASH_REQ_SIZE		sizeof(struct ahash_request) + \
+					sizeof(struct safexcel_ahash_req)
+#define EIP197_AEAD_REQ_SIZE		sizeof(struct aead_request) + \
+					sizeof(struct safexcel_cipher_req)
+#define EIP197_REQUEST_ON_STACK(name, type, size) \
+	char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \
+	struct type##_request *name = (void *)__##name##_desc
+
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv)		((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv)		((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv)		((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv)	((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv)		((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv)	((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv)		((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv)	((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv)	((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv)			((priv)->base + (priv)->offsets.pe)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE		0x90000
+#define EIP197_HIA_AIC_G_BASE		0x90000
+#define EIP197_HIA_AIC_R_BASE		0x90800
+#define EIP197_HIA_AIC_xDR_BASE		0x80000
+#define EIP197_HIA_DFE_BASE		0x8c000
+#define EIP197_HIA_DFE_THR_BASE		0x8c040
+#define EIP197_HIA_DSE_BASE		0x8d000
+#define EIP197_HIA_DSE_THR_BASE		0x8d040
+#define EIP197_HIA_GEN_CFG_BASE		0xf0000
+#define EIP197_PE_BASE			0xa0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE		0x0
+#define EIP97_HIA_AIC_G_BASE		0x0
+#define EIP97_HIA_AIC_R_BASE		0x0
+#define EIP97_HIA_AIC_xDR_BASE		0x0
+#define EIP97_HIA_DFE_BASE		0xf000
+#define EIP97_HIA_DFE_THR_BASE		0xf200
+#define EIP97_HIA_DSE_BASE		0xf400
+#define EIP97_HIA_DSE_THR_BASE		0xf600
+#define EIP97_HIA_GEN_CFG_BASE		0x10000
+#define EIP97_PE_BASE			0x10000
+
+/* CDR/RDR register offsets */
+#define EIP197_HIA_xDR_OFF(priv, r)		(EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r)			(EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r)			(EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO	0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI	0x0004
+#define EIP197_HIA_xDR_RING_SIZE		0x0018
+#define EIP197_HIA_xDR_DESC_SIZE		0x001c
+#define EIP197_HIA_xDR_CFG			0x0020
+#define EIP197_HIA_xDR_DMA_CFG			0x0024
+#define EIP197_HIA_xDR_THRESH			0x0028
+#define EIP197_HIA_xDR_PREP_COUNT		0x002c
+#define EIP197_HIA_xDR_PROC_COUNT		0x0030
+#define EIP197_HIA_xDR_PREP_PNTR		0x0034
+#define EIP197_HIA_xDR_PROC_PNTR		0x0038
+#define EIP197_HIA_xDR_STAT			0x003c
+
+/* register offsets */
+#define EIP197_HIA_DFE_CFG(n)			(0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_CTRL(n)		(0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_STAT(n)		(0x0004 + (128 * (n)))
+#define EIP197_HIA_DSE_CFG(n)			(0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_CTRL(n)		(0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_STAT(n)		(0x0004 + (128 * (n)))
+#define EIP197_HIA_RA_PE_CTRL(n)		(0x0010 + (8   * (n)))
+#define EIP197_HIA_RA_PE_STAT			0x0014
+#define EIP197_HIA_AIC_R_OFF(r)			((r) * 0x1000)
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r)		(0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r)	(0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r)			(0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r)		(0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL		0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT		0xf810
+#define EIP197_HIA_AIC_G_ACK			0xf810
+#define EIP197_HIA_MST_CTRL			0xfff4
+#define EIP197_HIA_OPTIONS			0xfff8
+#define EIP197_HIA_VERSION			0xfffc
+#define EIP197_PE_IN_DBUF_THRES(n)		(0x0000 + (0x2000 * (n)))
+#define EIP197_PE_IN_TBUF_THRES(n)		(0x0100 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_RAM(n)		(0x0800 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUE_CTRL(n)		(0x0c80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_CTRL(n)		(0x0d04 + (0x2000 * (n)))
+#define EIP197_PE_ICE_FPP_CTRL(n)		(0x0d80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_RAM_CTRL(n)		(0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION_EN(n)		(0x1004 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_CTRL(n)		(0x1008 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_STAT(n)		(0x100c + (0x2000 * (n)))
+#define EIP197_PE_OUT_DBUF_THRES(n)		(0x1c00 + (0x2000 * (n)))
+#define EIP197_PE_OUT_TBUF_THRES(n)		(0x1d00 + (0x2000 * (n)))
+#define EIP197_MST_CTRL				0xfff4
+
+/* EIP197-specific registers, no indirection */
+#define EIP197_CLASSIFICATION_RAMS		0xe0000
+#define EIP197_TRC_CTRL				0xf0800
+#define EIP197_TRC_LASTRES			0xf0804
+#define EIP197_TRC_REGINDEX			0xf0808
+#define EIP197_TRC_PARAMS			0xf0820
+#define EIP197_TRC_FREECHAIN			0xf0824
+#define EIP197_TRC_PARAMS2			0xf0828
+#define EIP197_TRC_ECCCTRL			0xf0830
+#define EIP197_TRC_ECCSTAT			0xf0834
+#define EIP197_TRC_ECCADMINSTAT			0xf0838
+#define EIP197_TRC_ECCDATASTAT			0xf083c
+#define EIP197_TRC_ECCDATA			0xf0840
+#define EIP197_CS_RAM_CTRL			0xf7ff0
+
+/* EIP197_HIA_xDR_DESC_SIZE */
+#define EIP197_xDR_DESC_MODE_64BIT		BIT(31)
+
+/* EIP197_HIA_xDR_DMA_CFG */
+#define EIP197_HIA_xDR_WR_RES_BUF		BIT(22)
+#define EIP197_HIA_xDR_WR_CTRL_BUF		BIT(23)
+#define EIP197_HIA_xDR_WR_OWN_BUF		BIT(24)
+#define EIP197_HIA_xDR_CFG_WR_CACHE(n)		(((n) & 0x7) << 25)
+#define EIP197_HIA_xDR_CFG_RD_CACHE(n)		(((n) & 0x7) << 29)
+
+/* EIP197_HIA_CDR_THRESH */
+#define EIP197_HIA_CDR_THRESH_PROC_PKT(n)	(n)
+#define EIP197_HIA_CDR_THRESH_PROC_MODE		BIT(22)
+#define EIP197_HIA_CDR_THRESH_PKT_MODE		BIT(23)
+#define EIP197_HIA_CDR_THRESH_TIMEOUT(n)	((n) << 24) /* x256 clk cycles */
+
+/* EIP197_HIA_RDR_THRESH */
+#define EIP197_HIA_RDR_THRESH_PROC_PKT(n)	(n)
+#define EIP197_HIA_RDR_THRESH_PKT_MODE		BIT(23)
+#define EIP197_HIA_RDR_THRESH_TIMEOUT(n)	((n) << 24) /* x256 clk cycles */
+
+/* EIP197_HIA_xDR_PREP_COUNT */
+#define EIP197_xDR_PREP_CLR_COUNT		BIT(31)
+
+/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET		24
+#define EIP197_xDR_PROC_xD_PKT_MASK		GENMASK(6, 0)
+#define EIP197_xDR_PROC_xD_COUNT(n)		((n) << 2)
+#define EIP197_xDR_PROC_xD_PKT(n)		((n) << 24)
+#define EIP197_xDR_PROC_CLR_COUNT		BIT(31)
+
+/* EIP197_HIA_xDR_STAT */
+#define EIP197_xDR_DMA_ERR			BIT(0)
+#define EIP197_xDR_PREP_CMD_THRES		BIT(1)
+#define EIP197_xDR_ERR				BIT(2)
+#define EIP197_xDR_THRESH			BIT(4)
+#define EIP197_xDR_TIMEOUT			BIT(5)
+
+#define EIP197_HIA_RA_PE_CTRL_RESET		BIT(31)
+#define EIP197_HIA_RA_PE_CTRL_EN		BIT(30)
+
+/* EIP197_HIA_OPTIONS */
+#define EIP197_N_PES_OFFSET			4
+#define EIP197_N_PES_MASK			GENMASK(4, 0)
+#define EIP97_N_PES_MASK			GENMASK(2, 0)
+
+/* EIP197_HIA_AIC_R_ENABLE_CTRL */
+#define EIP197_CDR_IRQ(n)			BIT((n) * 2)
+#define EIP197_RDR_IRQ(n)			BIT((n) * 2 + 1)
+
+/* EIP197_HIA_DFE/DSE_CFG */
+#define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n)	((n) << 0)
+#define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n)	(((n) & 0x7) << 4)
+#define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n)	((n) << 8)
+#define EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE	GENMASK(15, 14)
+#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n)	((n) << 16)
+#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n)	(((n) & 0x7) << 20)
+#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n)	((n) << 24)
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG		(BIT(31) | BIT(29))
+#define EIP197_HIA_DSE_CFG_EN_SINGLE_WR		BIT(29)
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG		BIT(31)
+
+/* EIP197_HIA_DFE/DSE_THR_CTRL */
+#define EIP197_DxE_THR_CTRL_EN			BIT(30)
+#define EIP197_DxE_THR_CTRL_RESET_PE		BIT(31)
+
+/* EIP197_HIA_AIC_G_ENABLED_STAT */
+#define EIP197_G_IRQ_DFE(n)			BIT((n) << 1)
+#define EIP197_G_IRQ_DSE(n)			BIT(((n) << 1) + 1)
+#define EIP197_G_IRQ_RING			BIT(16)
+#define EIP197_G_IRQ_PE(n)			BIT((n) + 20)
+
+/* EIP197_HIA_MST_CTRL */
+#define RD_CACHE_3BITS				0x5
+#define WR_CACHE_3BITS				0x3
+#define RD_CACHE_4BITS				(RD_CACHE_3BITS << 1 | BIT(0))
+#define WR_CACHE_4BITS				(WR_CACHE_3BITS << 1 | BIT(0))
+#define EIP197_MST_CTRL_RD_CACHE(n)		(((n) & 0xf) << 0)
+#define EIP197_MST_CTRL_WD_CACHE(n)		(((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_TX_MAX_CMD(n)		(((n) & 0xf) << 20)
+#define EIP197_MST_CTRL_BYTE_SWAP		BIT(24)
+#define EIP197_MST_CTRL_NO_BYTE_SWAP		BIT(25)
+
+/* EIP197_PE_IN_DBUF/TBUF_THRES */
+#define EIP197_PE_IN_xBUF_THRES_MIN(n)		((n) << 8)
+#define EIP197_PE_IN_xBUF_THRES_MAX(n)		((n) << 12)
+
+/* EIP197_PE_OUT_DBUF_THRES */
+#define EIP197_PE_OUT_DBUF_THRES_MIN(n)		((n) << 0)
+#define EIP197_PE_OUT_DBUF_THRES_MAX(n)		((n) << 4)
+
+/* EIP197_PE_ICE_SCRATCH_CTRL */
+#define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER		BIT(2)
+#define EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN		BIT(3)
+#define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS	BIT(24)
+#define EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS	BIT(25)
+
+/* EIP197_PE_ICE_SCRATCH_RAM */
+#define EIP197_NUM_OF_SCRATCH_BLOCKS		32
+
+/* EIP197_PE_ICE_PUE/FPP_CTRL */
+#define EIP197_PE_ICE_x_CTRL_SW_RESET			BIT(0)
+#define EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR		BIT(14)
+#define EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR		BIT(15)
+
+/* EIP197_PE_ICE_RAM_CTRL */
+#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN	BIT(0)
+#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN	BIT(1)
+
+/* EIP197_PE_EIP96_FUNCTION_EN */
+#define EIP197_FUNCTION_RSVD			(BIT(6) | BIT(15) | BIT(20) | BIT(23))
+#define EIP197_PROTOCOL_HASH_ONLY		BIT(0)
+#define EIP197_PROTOCOL_ENCRYPT_ONLY		BIT(1)
+#define EIP197_PROTOCOL_HASH_ENCRYPT		BIT(2)
+#define EIP197_PROTOCOL_HASH_DECRYPT		BIT(3)
+#define EIP197_PROTOCOL_ENCRYPT_HASH		BIT(4)
+#define EIP197_PROTOCOL_DECRYPT_HASH		BIT(5)
+#define EIP197_ALG_ARC4				BIT(7)
+#define EIP197_ALG_AES_ECB			BIT(8)
+#define EIP197_ALG_AES_CBC			BIT(9)
+#define EIP197_ALG_AES_CTR_ICM			BIT(10)
+#define EIP197_ALG_AES_OFB			BIT(11)
+#define EIP197_ALG_AES_CFB			BIT(12)
+#define EIP197_ALG_DES_ECB			BIT(13)
+#define EIP197_ALG_DES_CBC			BIT(14)
+#define EIP197_ALG_DES_OFB			BIT(16)
+#define EIP197_ALG_DES_CFB			BIT(17)
+#define EIP197_ALG_3DES_ECB			BIT(18)
+#define EIP197_ALG_3DES_CBC			BIT(19)
+#define EIP197_ALG_3DES_OFB			BIT(21)
+#define EIP197_ALG_3DES_CFB			BIT(22)
+#define EIP197_ALG_MD5				BIT(24)
+#define EIP197_ALG_HMAC_MD5			BIT(25)
+#define EIP197_ALG_SHA1				BIT(26)
+#define EIP197_ALG_HMAC_SHA1			BIT(27)
+#define EIP197_ALG_SHA2				BIT(28)
+#define EIP197_ALG_HMAC_SHA2			BIT(29)
+#define EIP197_ALG_AES_XCBC_MAC			BIT(30)
+#define EIP197_ALG_GCM_HASH			BIT(31)
+
+/* EIP197_PE_EIP96_CONTEXT_CTRL */
+#define EIP197_CONTEXT_SIZE(n)			(n)
+#define EIP197_ADDRESS_MODE			BIT(8)
+#define EIP197_CONTROL_MODE			BIT(9)
+
+/* Context Control */
+struct safexcel_context_record {
+	u32 control0;
+	u32 control1;
+
+	__le32 data[40];
+} __packed;
+
+/* control0 */
+#define CONTEXT_CONTROL_TYPE_NULL_OUT		0x0
+#define CONTEXT_CONTROL_TYPE_NULL_IN		0x1
+#define CONTEXT_CONTROL_TYPE_HASH_OUT		0x2
+#define CONTEXT_CONTROL_TYPE_HASH_IN		0x3
+#define CONTEXT_CONTROL_TYPE_CRYPTO_OUT		0x4
+#define CONTEXT_CONTROL_TYPE_CRYPTO_IN		0x5
+#define CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT	0x6
+#define CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN	0x7
+#define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT	0xe
+#define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN	0xf
+#define CONTEXT_CONTROL_RESTART_HASH		BIT(4)
+#define CONTEXT_CONTROL_NO_FINISH_HASH		BIT(5)
+#define CONTEXT_CONTROL_SIZE(n)			((n) << 8)
+#define CONTEXT_CONTROL_KEY_EN			BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_DES		(0x0 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_3DES		(0x2 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES128	(0x5 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES192	(0x6 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES256	(0x7 << 17)
+#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED	(0x1 << 21)
+#define CONTEXT_CONTROL_DIGEST_HMAC		(0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_MD5		(0x0 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1		(0x2 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224	(0x4 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256	(0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384	(0x6 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512	(0x5 << 23)
+#define CONTEXT_CONTROL_INV_FR			(0x5 << 24)
+#define CONTEXT_CONTROL_INV_TR			(0x6 << 24)
+
+/* control1 */
+#define CONTEXT_CONTROL_CRYPTO_MODE_ECB		(0 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_CBC		(1 << 0)
+#define CONTEXT_CONTROL_IV0			BIT(5)
+#define CONTEXT_CONTROL_IV1			BIT(6)
+#define CONTEXT_CONTROL_IV2			BIT(7)
+#define CONTEXT_CONTROL_IV3			BIT(8)
+#define CONTEXT_CONTROL_DIGEST_CNT		BIT(9)
+#define CONTEXT_CONTROL_COUNTER_MODE		BIT(10)
+#define CONTEXT_CONTROL_HASH_STORE		BIT(19)
+
+/* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+#define EIP197_COUNTER_BLOCK_SIZE		64
+
+/* EIP197_CS_RAM_CTRL */
+#define EIP197_TRC_ENABLE_0			BIT(4)
+#define EIP197_TRC_ENABLE_1			BIT(5)
+#define EIP197_TRC_ENABLE_2			BIT(6)
+#define EIP197_TRC_ENABLE_MASK			GENMASK(6, 4)
+
+/* EIP197_TRC_PARAMS */
+#define EIP197_TRC_PARAMS_SW_RESET		BIT(0)
+#define EIP197_TRC_PARAMS_DATA_ACCESS		BIT(2)
+#define EIP197_TRC_PARAMS_HTABLE_SZ(x)		((x) << 4)
+#define EIP197_TRC_PARAMS_BLK_TIMER_SPEED(x)	((x) << 10)
+#define EIP197_TRC_PARAMS_RC_SZ_LARGE(n)	((n) << 18)
+
+/* EIP197_TRC_FREECHAIN */
+#define EIP197_TRC_FREECHAIN_HEAD_PTR(p)	(p)
+#define EIP197_TRC_FREECHAIN_TAIL_PTR(p)	((p) << 16)
+
+/* EIP197_TRC_PARAMS2 */
+#define EIP197_TRC_PARAMS2_HTABLE_PTR(p)	(p)
+#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n)	((n) << 18)
+
+/* Cache helpers */
+#define EIP197B_CS_RC_MAX			52
+#define EIP197D_CS_RC_MAX			96
+#define EIP197_CS_RC_SIZE			(4 * sizeof(u32))
+#define EIP197_CS_RC_NEXT(x)			(x)
+#define EIP197_CS_RC_PREV(x)			((x) << 10)
+#define EIP197_RC_NULL				0x3ff
+#define EIP197B_CS_TRC_REC_WC			59
+#define EIP197D_CS_TRC_REC_WC			64
+#define EIP197B_CS_TRC_LG_REC_WC		73
+#define EIP197D_CS_TRC_LG_REC_WC		80
+#define EIP197B_CS_HT_WC			64
+#define EIP197D_CS_HT_WC			256
+
+
+/* Result data */
+struct result_data_desc {
+	u32 packet_length:17;
+	u32 error_code:15;
+
+	u8 bypass_length:4;
+	u8 e15:1;
+	u16 rsvd0;
+	u8 hash_bytes:1;
+	u8 hash_length:6;
+	u8 generic_bytes:1;
+	u8 checksum:1;
+	u8 next_header:1;
+	u8 length:1;
+
+	u16 application_id;
+	u16 rsvd1;
+
+	u32 rsvd2;
+} __packed;
+
+
+/* Basic Result Descriptor format */
+struct safexcel_result_desc {
+	u32 particle_size:17;
+	u8 rsvd0:3;
+	u8 descriptor_overflow:1;
+	u8 buffer_overflow:1;
+	u8 last_seg:1;
+	u8 first_seg:1;
+	u16 result_size:8;
+
+	u32 rsvd1;
+
+	u32 data_lo;
+	u32 data_hi;
+
+	struct result_data_desc result_data;
+} __packed;
+
+struct safexcel_token {
+	u32 packet_length:17;
+	u8 stat:2;
+	u16 instructions:9;
+	u8 opcode:4;
+} __packed;
+
+#define EIP197_TOKEN_HASH_RESULT_VERIFY		BIT(16)
+
+#define EIP197_TOKEN_STAT_LAST_HASH		BIT(0)
+#define EIP197_TOKEN_STAT_LAST_PACKET		BIT(1)
+#define EIP197_TOKEN_OPCODE_DIRECTION		0x0
+#define EIP197_TOKEN_OPCODE_INSERT		0x2
+#define EIP197_TOKEN_OPCODE_NOOP		EIP197_TOKEN_OPCODE_INSERT
+#define EIP197_TOKEN_OPCODE_RETRIEVE		0x4
+#define EIP197_TOKEN_OPCODE_VERIFY		0xd
+#define EIP197_TOKEN_OPCODE_BYPASS		GENMASK(3, 0)
+
+static inline void eip197_noop_token(struct safexcel_token *token)
+{
+	token->opcode = EIP197_TOKEN_OPCODE_NOOP;
+	token->packet_length = BIT(2);
+}
+
+/* Instructions */
+#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST	0x1c
+#define EIP197_TOKEN_INS_TYPE_OUTPUT		BIT(5)
+#define EIP197_TOKEN_INS_TYPE_HASH		BIT(6)
+#define EIP197_TOKEN_INS_TYPE_CRYTO		BIT(7)
+#define EIP197_TOKEN_INS_LAST			BIT(8)
+
+/* Processing Engine Control Data  */
+struct safexcel_control_data_desc {
+	u32 packet_length:17;
+	u16 options:13;
+	u8 type:2;
+
+	u16 application_id;
+	u16 rsvd;
+
+	u8 refresh:2;
+	u32 context_lo:30;
+	u32 context_hi;
+
+	u32 control0;
+	u32 control1;
+
+	u32 token[EIP197_MAX_TOKENS];
+} __packed;
+
+#define EIP197_OPTION_MAGIC_VALUE	BIT(0)
+#define EIP197_OPTION_64BIT_CTX		BIT(1)
+#define EIP197_OPTION_CTX_CTRL_IN_CMD	BIT(8)
+#define EIP197_OPTION_2_TOKEN_IV_CMD	GENMASK(11, 10)
+#define EIP197_OPTION_4_TOKEN_IV_CMD	GENMASK(11, 9)
+
+#define EIP197_TYPE_EXTENDED		0x3
+
+/* Basic Command Descriptor format */
+struct safexcel_command_desc {
+	u32 particle_size:17;
+	u8 rsvd0:5;
+	u8 last_seg:1;
+	u8 first_seg:1;
+	u16 additional_cdata_size:8;
+
+	u32 rsvd1;
+
+	u32 data_lo;
+	u32 data_hi;
+
+	struct safexcel_control_data_desc control_data;
+} __packed;
+
+/*
+ * Internal structures & functions
+ */
+
+enum eip197_fw {
+	FW_IFPP = 0,
+	FW_IPUE,
+	FW_NB
+};
+
+struct safexcel_desc_ring {
+	void *base;
+	void *base_end;
+	dma_addr_t base_dma;
+
+	/* write and read pointers */
+	void *write;
+	void *read;
+
+	/* descriptor element offset */
+	unsigned offset;
+};
+
+enum safexcel_alg_type {
+	SAFEXCEL_ALG_TYPE_SKCIPHER,
+	SAFEXCEL_ALG_TYPE_AEAD,
+	SAFEXCEL_ALG_TYPE_AHASH,
+};
+
+struct safexcel_config {
+	u32 pes;
+	u32 rings;
+
+	u32 cd_size;
+	u32 cd_offset;
+
+	u32 rd_size;
+	u32 rd_offset;
+};
+
+struct safexcel_work_data {
+	struct work_struct work;
+	struct safexcel_crypto_priv *priv;
+	int ring;
+};
+
+struct safexcel_ring {
+	spinlock_t lock;
+
+	struct workqueue_struct *workqueue;
+	struct safexcel_work_data work_data;
+
+	/* command/result rings */
+	struct safexcel_desc_ring cdr;
+	struct safexcel_desc_ring rdr;
+
+	/* result ring crypto API request */
+	struct crypto_async_request **rdr_req;
+
+	/* queue */
+	struct crypto_queue queue;
+	spinlock_t queue_lock;
+
+	/* Number of requests in the engine. */
+	int requests;
+
+	/* The ring is currently handling at least one request */
+	bool busy;
+
+	/* Store for current requests when bailing out of the dequeueing
+	 * function when no enough resources are available.
+	 */
+	struct crypto_async_request *req;
+	struct crypto_async_request *backlog;
+};
+
+enum safexcel_eip_version {
+	EIP97IES = BIT(0),
+	EIP197B  = BIT(1),
+	EIP197D  = BIT(2),
+};
+
+struct safexcel_register_offsets {
+	u32 hia_aic;
+	u32 hia_aic_g;
+	u32 hia_aic_r;
+	u32 hia_aic_xdr;
+	u32 hia_dfe;
+	u32 hia_dfe_thr;
+	u32 hia_dse;
+	u32 hia_dse_thr;
+	u32 hia_gen_cfg;
+	u32 pe;
+};
+
+enum safexcel_flags {
+	EIP197_TRC_CACHE = BIT(0),
+};
+
+struct safexcel_crypto_priv {
+	void __iomem *base;
+	struct device *dev;
+	struct clk *clk;
+	struct clk *reg_clk;
+	struct safexcel_config config;
+
+	enum safexcel_eip_version version;
+	struct safexcel_register_offsets offsets;
+	u32 flags;
+
+	/* context DMA pool */
+	struct dma_pool *context_pool;
+
+	atomic_t ring_used;
+
+	struct safexcel_ring *ring;
+};
+
+struct safexcel_context {
+	int (*send)(struct crypto_async_request *req, int ring,
+		    int *commands, int *results);
+	int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
+			     struct crypto_async_request *req, bool *complete,
+			     int *ret);
+	struct safexcel_context_record *ctxr;
+	dma_addr_t ctxr_dma;
+
+	int ring;
+	bool needs_inv;
+	bool exit_inv;
+};
+
+struct safexcel_ahash_export_state {
+	u64 len[2];
+	u64 processed[2];
+
+	u32 digest;
+
+	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u8 cache[SHA512_BLOCK_SIZE];
+};
+
+/*
+ * Template structure to describe the algorithms in order to register them.
+ * It also has the purpose to contain our private structure and is actually
+ * the only way I know in this framework to avoid having global pointers...
+ */
+struct safexcel_alg_template {
+	struct safexcel_crypto_priv *priv;
+	enum safexcel_alg_type type;
+	u32 engines;
+	union {
+		struct skcipher_alg skcipher;
+		struct aead_alg aead;
+		struct ahash_alg ahash;
+	} alg;
+};
+
+struct safexcel_inv_result {
+	struct completion completion;
+	int error;
+};
+
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
+int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+				struct safexcel_result_desc *rdesc);
+void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
+int safexcel_invalidate_cache(struct crypto_async_request *async,
+			      struct safexcel_crypto_priv *priv,
+			      dma_addr_t ctxr_dma, int ring);
+int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
+				   struct safexcel_desc_ring *cdr,
+				   struct safexcel_desc_ring *rdr);
+int safexcel_select_ring(struct safexcel_crypto_priv *priv);
+void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
+			      struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int  ring);
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+				 struct safexcel_desc_ring *ring);
+struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						 bool first, bool last,
+						 dma_addr_t data, u32 len,
+						 u32 full_data_len,
+						 dma_addr_t context);
+struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						bool first, bool last,
+						dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+				  int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+				  int ring,
+				  struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+			  int ring,
+			  struct safexcel_result_desc *rdesc,
+			  struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
+void safexcel_inv_complete(struct crypto_async_request *req, int error);
+int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
+			 void *istate, void *ostate);
+
+/* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_des;
+extern struct safexcel_alg_template safexcel_alg_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_ecb_aes;
+extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_md5;
+extern struct safexcel_alg_template safexcel_alg_sha1;
+extern struct safexcel_alg_template safexcel_alg_sha224;
+extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_sha384;
+extern struct safexcel_alg_template safexcel_alg_sha512;
+extern struct safexcel_alg_template safexcel_alg_hmac_md5;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
+
+#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
new file mode 100644
index 0000000..3aef1d4
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -0,0 +1,1336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "safexcel.h"
+
+enum safexcel_cipher_direction {
+	SAFEXCEL_ENCRYPT,
+	SAFEXCEL_DECRYPT,
+};
+
+enum safexcel_cipher_alg {
+	SAFEXCEL_DES,
+	SAFEXCEL_3DES,
+	SAFEXCEL_AES,
+};
+
+struct safexcel_cipher_ctx {
+	struct safexcel_context base;
+	struct safexcel_crypto_priv *priv;
+
+	u32 mode;
+	enum safexcel_cipher_alg alg;
+	bool aead;
+
+	__le32 key[8];
+	unsigned int key_len;
+
+	/* All the below is AEAD specific */
+	u32 hash_alg;
+	u32 state_sz;
+	u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+};
+
+struct safexcel_cipher_req {
+	enum safexcel_cipher_direction direction;
+	bool needs_inv;
+};
+
+static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+				    struct safexcel_command_desc *cdesc,
+				    u32 length)
+{
+	struct safexcel_token *token;
+	unsigned offset = 0;
+
+	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+		switch (ctx->alg) {
+		case SAFEXCEL_DES:
+			offset = DES_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+			break;
+		case SAFEXCEL_3DES:
+			offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+			break;
+
+		case SAFEXCEL_AES:
+			offset = AES_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+			break;
+		}
+	}
+
+	token = (struct safexcel_token *)(cdesc->control_data.token + offset);
+
+	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[0].packet_length = length;
+	token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
+			EIP197_TOKEN_STAT_LAST_HASH;
+	token[0].instructions = EIP197_TOKEN_INS_LAST |
+				EIP197_TOKEN_INS_TYPE_CRYTO |
+				EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
+
+static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+				struct safexcel_command_desc *cdesc,
+				enum safexcel_cipher_direction direction,
+				u32 cryptlen, u32 assoclen, u32 digestsize)
+{
+	struct safexcel_token *token;
+	unsigned offset = 0;
+
+	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+		offset = AES_BLOCK_SIZE / sizeof(u32);
+		memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+
+		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+	}
+
+	token = (struct safexcel_token *)(cdesc->control_data.token + offset);
+
+	if (direction == SAFEXCEL_DECRYPT)
+		cryptlen -= digestsize;
+
+	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[0].packet_length = assoclen;
+	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH |
+				EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+	token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[1].packet_length = cryptlen;
+	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
+	token[1].instructions = EIP197_TOKEN_INS_LAST |
+				EIP197_TOKEN_INS_TYPE_CRYTO |
+				EIP197_TOKEN_INS_TYPE_HASH |
+				EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+	if (direction == SAFEXCEL_ENCRYPT) {
+		token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
+		token[2].packet_length = digestsize;
+		token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				EIP197_TOKEN_STAT_LAST_PACKET;
+		token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+					EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+	} else {
+		token[2].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+		token[2].packet_length = digestsize;
+		token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				EIP197_TOKEN_STAT_LAST_PACKET;
+		token[2].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+		token[3].opcode = EIP197_TOKEN_OPCODE_VERIFY;
+		token[3].packet_length = digestsize |
+					 EIP197_TOKEN_HASH_RESULT_VERIFY;
+		token[3].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				EIP197_TOKEN_STAT_LAST_PACKET;
+		token[3].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+	}
+}
+
+static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
+					const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_aes_ctx aes;
+	int ret, i;
+
+	ret = crypto_aes_expand_key(&aes, key, len);
+	if (ret) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return ret;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < len / sizeof(u32); i++) {
+			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < len / sizeof(u32); i++)
+		ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
+
+	ctx->key_len = len;
+
+	memzero_explicit(&aes, sizeof(aes));
+	return 0;
+}
+
+static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
+				    unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_ahash_export_state istate, ostate;
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_authenc_keys keys;
+
+	if (crypto_authenc_extractkeys(&keys, key, len) != 0)
+		goto badkey;
+
+	if (keys.enckeylen > sizeof(ctx->key))
+		goto badkey;
+
+	/* Encryption key */
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
+	    memcmp(ctx->key, keys.enckey, keys.enckeylen))
+		ctx->base.needs_inv = true;
+
+	/* Auth key */
+	switch (ctx->hash_alg) {
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
+		if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA224:
+		if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA256:
+		if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
+		if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
+		if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	default:
+		dev_err(priv->dev, "aead: unsupported hash algorithm\n");
+		goto badkey;
+	}
+
+	crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
+				    CRYPTO_TFM_RES_MASK);
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
+	    (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
+	     memcmp(ctx->opad, ostate.state, ctx->state_sz)))
+		ctx->base.needs_inv = true;
+
+	/* Now copy the keys into the context */
+	memcpy(ctx->key, keys.enckey, keys.enckeylen);
+	ctx->key_len = keys.enckeylen;
+
+	memcpy(ctx->ipad, &istate.state, ctx->state_sz);
+	memcpy(ctx->opad, &ostate.state, ctx->state_sz);
+
+	memzero_explicit(&keys, sizeof(keys));
+	return 0;
+
+badkey:
+	crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	memzero_explicit(&keys, sizeof(keys));
+	return -EINVAL;
+}
+
+static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+				    struct crypto_async_request *async,
+				    struct safexcel_cipher_req *sreq,
+				    struct safexcel_command_desc *cdesc)
+{
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ctrl_size;
+
+	if (ctx->aead) {
+		if (sreq->direction == SAFEXCEL_ENCRYPT)
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+		else
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
+	} else {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
+
+		/* The decryption control type is a combination of the
+		 * encryption type and CONTEXT_CONTROL_TYPE_NULL_IN, for all
+		 * types.
+		 */
+		if (sreq->direction == SAFEXCEL_DECRYPT)
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_NULL_IN;
+	}
+
+	cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
+	cdesc->control_data.control1 |= ctx->mode;
+
+	if (ctx->aead)
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
+						ctx->hash_alg;
+
+	if (ctx->alg == SAFEXCEL_DES) {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES;
+	} else if (ctx->alg == SAFEXCEL_3DES) {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES;
+	} else if (ctx->alg == SAFEXCEL_AES) {
+		switch (ctx->key_len) {
+		case AES_KEYSIZE_128:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+			break;
+		case AES_KEYSIZE_192:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+			break;
+		case AES_KEYSIZE_256:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+			break;
+		default:
+			dev_err(priv->dev, "aes keysize not supported: %u\n",
+				ctx->key_len);
+			return -EINVAL;
+		}
+	}
+
+	ctrl_size = ctx->key_len / sizeof(u32);
+	if (ctx->aead)
+		/* Take in account the ipad+opad digests */
+		ctrl_size += ctx->state_sz / sizeof(u32) * 2;
+	cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
+
+	return 0;
+}
+
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+				      struct crypto_async_request *async,
+				      struct scatterlist *src,
+				      struct scatterlist *dst,
+				      unsigned int cryptlen,
+				      struct safexcel_cipher_req *sreq,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_result_desc *rdesc;
+	int ndesc = 0;
+
+	*ret = 0;
+
+	do {
+		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+		if (IS_ERR(rdesc)) {
+			dev_err(priv->dev,
+				"cipher: result: could not retrieve the result descriptor\n");
+			*ret = PTR_ERR(rdesc);
+			break;
+		}
+
+		if (likely(!*ret))
+			*ret = safexcel_rdesc_check_errors(priv, rdesc);
+
+		ndesc++;
+	} while (!rdesc->last_seg);
+
+	safexcel_complete(priv, ring);
+
+	if (src == dst) {
+		dma_unmap_sg(priv->dev, src,
+			     sg_nents_for_len(src, cryptlen),
+			     DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(priv->dev, src,
+			     sg_nents_for_len(src, cryptlen),
+			     DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst,
+			     sg_nents_for_len(dst, cryptlen),
+			     DMA_FROM_DEVICE);
+	}
+
+	*should_complete = true;
+
+	return ndesc;
+}
+
+static int safexcel_send_req(struct crypto_async_request *base, int ring,
+			     struct safexcel_cipher_req *sreq,
+			     struct scatterlist *src, struct scatterlist *dst,
+			     unsigned int cryptlen, unsigned int assoclen,
+			     unsigned int digestsize, u8 *iv, int *commands,
+			     int *results)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_command_desc *cdesc;
+	struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
+	struct scatterlist *sg;
+	unsigned int totlen = cryptlen + assoclen;
+	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
+	int i, ret = 0;
+
+	if (src == dst) {
+		nr_src = dma_map_sg(priv->dev, src,
+				    sg_nents_for_len(src, totlen),
+				    DMA_BIDIRECTIONAL);
+		nr_dst = nr_src;
+		if (!nr_src)
+			return -EINVAL;
+	} else {
+		nr_src = dma_map_sg(priv->dev, src,
+				    sg_nents_for_len(src, totlen),
+				    DMA_TO_DEVICE);
+		if (!nr_src)
+			return -EINVAL;
+
+		nr_dst = dma_map_sg(priv->dev, dst,
+				    sg_nents_for_len(dst, totlen),
+				    DMA_FROM_DEVICE);
+		if (!nr_dst) {
+			dma_unmap_sg(priv->dev, src,
+				     sg_nents_for_len(src, totlen),
+				     DMA_TO_DEVICE);
+			return -EINVAL;
+		}
+	}
+
+	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
+
+	if (ctx->aead) {
+		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
+		       ctx->ipad, ctx->state_sz);
+		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
+		       ctx->opad, ctx->state_sz);
+	}
+
+	/* command descriptors */
+	for_each_sg(src, sg, nr_src, i) {
+		int len = sg_dma_len(sg);
+
+		/* Do not overflow the request */
+		if (queued - len < 0)
+			len = queued;
+
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
+					   sg_dma_address(sg), len, totlen,
+					   ctx->base.ctxr_dma);
+		if (IS_ERR(cdesc)) {
+			/* No space left in the command descriptor ring */
+			ret = PTR_ERR(cdesc);
+			goto cdesc_rollback;
+		}
+		n_cdesc++;
+
+		if (n_cdesc == 1) {
+			safexcel_context_control(ctx, base, sreq, cdesc);
+			if (ctx->aead)
+				safexcel_aead_token(ctx, iv, cdesc,
+						    sreq->direction, cryptlen,
+						    assoclen, digestsize);
+			else
+				safexcel_skcipher_token(ctx, iv, cdesc,
+							cryptlen);
+		}
+
+		queued -= len;
+		if (!queued)
+			break;
+	}
+
+	/* result descriptors */
+	for_each_sg(dst, sg, nr_dst, i) {
+		bool first = !i, last = (i == nr_dst - 1);
+		u32 len = sg_dma_len(sg);
+
+		rdesc = safexcel_add_rdesc(priv, ring, first, last,
+					   sg_dma_address(sg), len);
+		if (IS_ERR(rdesc)) {
+			/* No space left in the result descriptor ring */
+			ret = PTR_ERR(rdesc);
+			goto rdesc_rollback;
+		}
+		if (first)
+			first_rdesc = rdesc;
+		n_rdesc++;
+	}
+
+	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
+
+	*commands = n_cdesc;
+	*results = n_rdesc;
+	return 0;
+
+rdesc_rollback:
+	for (i = 0; i < n_rdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
+cdesc_rollback:
+	for (i = 0; i < n_cdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+
+	if (src == dst) {
+		dma_unmap_sg(priv->dev, src,
+			     sg_nents_for_len(src, totlen),
+			     DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(priv->dev, src,
+			     sg_nents_for_len(src, totlen),
+			     DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst,
+			     sg_nents_for_len(dst, totlen),
+			     DMA_FROM_DEVICE);
+	}
+
+	return ret;
+}
+
+static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+				      int ring,
+				      struct crypto_async_request *base,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_result_desc *rdesc;
+	int ndesc = 0, enq_ret;
+
+	*ret = 0;
+
+	do {
+		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+		if (IS_ERR(rdesc)) {
+			dev_err(priv->dev,
+				"cipher: invalidate: could not retrieve the result descriptor\n");
+			*ret = PTR_ERR(rdesc);
+			break;
+		}
+
+		if (likely(!*ret))
+			*ret = safexcel_rdesc_check_errors(priv, rdesc);
+
+		ndesc++;
+	} while (!rdesc->last_seg);
+
+	safexcel_complete(priv, ring);
+
+	if (ctx->base.exit_inv) {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+
+		*should_complete = true;
+
+		return ndesc;
+	}
+
+	ring = safexcel_select_ring(priv);
+	ctx->base.ring = ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	if (enq_ret != -EINPROGRESS)
+		*ret = enq_ret;
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	*should_complete = false;
+
+	return ndesc;
+}
+
+static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
+					   int ring,
+					   struct crypto_async_request *async,
+					   bool *should_complete, int *ret)
+{
+	struct skcipher_request *req = skcipher_request_cast(async);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	int err;
+
+	if (sreq->needs_inv) {
+		sreq->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async, req->src,
+						 req->dst, req->cryptlen, sreq,
+						 should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
+				       int ring,
+				       struct crypto_async_request *async,
+				       bool *should_complete, int *ret)
+{
+	struct aead_request *req = aead_request_cast(async);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	int err;
+
+	if (sreq->needs_inv) {
+		sreq->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async, req->src,
+						 req->dst,
+						 req->cryptlen + crypto_aead_authsize(tfm),
+						 sreq, should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_cipher_send_inv(struct crypto_async_request *base,
+				    int ring, int *commands, int *results)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
+	if (unlikely(ret))
+		return ret;
+
+	*commands = 1;
+	*results = 1;
+
+	return 0;
+}
+
+static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
+				  int *commands, int *results)
+{
+	struct skcipher_request *req = skcipher_request_cast(async);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
+
+	if (sreq->needs_inv)
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
+	else
+		ret = safexcel_send_req(async, ring, sreq, req->src,
+					req->dst, req->cryptlen, 0, 0, req->iv,
+					commands, results);
+	return ret;
+}
+
+static int safexcel_aead_send(struct crypto_async_request *async, int ring,
+			      int *commands, int *results)
+{
+	struct aead_request *req = aead_request_cast(async);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
+
+	if (sreq->needs_inv)
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
+	else
+		ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+					req->cryptlen, req->assoclen,
+					crypto_aead_authsize(tfm), req->iv,
+					commands, results);
+	return ret;
+}
+
+static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
+				    struct crypto_async_request *base,
+				    struct safexcel_cipher_req *sreq,
+				    struct safexcel_inv_result *result)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ring = ctx->base.ring;
+
+	init_completion(&result->completion);
+
+	ctx = crypto_tfm_ctx(base->tfm);
+	ctx->base.exit_inv = true;
+	sreq->needs_inv = true;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	wait_for_completion(&result->completion);
+
+	if (result->error) {
+		dev_warn(priv->dev,
+			"cipher: sync: invalidate: completion error %d\n",
+			 result->error);
+		return result->error;
+	}
+
+	return 0;
+}
+
+static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
+{
+	EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	struct safexcel_inv_result result = {};
+
+	memset(req, 0, sizeof(struct skcipher_request));
+
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				      safexcel_inv_complete, &result);
+	skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+
+	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+}
+
+static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
+{
+	EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	struct safexcel_inv_result result = {};
+
+	memset(req, 0, sizeof(struct aead_request));
+
+	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				  safexcel_inv_complete, &result);
+	aead_request_set_tfm(req, __crypto_aead_cast(tfm));
+
+	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+}
+
+static int safexcel_queue_req(struct crypto_async_request *base,
+			struct safexcel_cipher_req *sreq,
+			enum safexcel_cipher_direction dir, u32 mode,
+			enum safexcel_cipher_alg alg)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret, ring;
+
+	sreq->needs_inv = false;
+	sreq->direction = dir;
+	ctx->alg = alg;
+	ctx->mode = mode;
+
+	if (ctx->base.ctxr) {
+		if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
+			sreq->needs_inv = true;
+			ctx->base.needs_inv = false;
+		}
+	} else {
+		ctx->base.ring = safexcel_select_ring(priv);
+		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+						 EIP197_GFP_FLAGS(*base),
+						 &ctx->base.ctxr_dma);
+		if (!ctx->base.ctxr)
+			return -ENOMEM;
+	}
+
+	ring = ctx->base.ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return ret;
+}
+
+static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_AES);
+}
+
+static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_AES);
+}
+
+static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(tfm->__crt_alg, struct safexcel_alg_template,
+			     alg.skcipher.base);
+
+	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+				    sizeof(struct safexcel_cipher_req));
+
+	ctx->priv = tmpl->priv;
+
+	ctx->base.send = safexcel_skcipher_send;
+	ctx->base.handle_result = safexcel_skcipher_handle_result;
+	return 0;
+}
+
+static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	memzero_explicit(ctx->key, sizeof(ctx->key));
+
+	/* context not allocated, skip invalidation */
+	if (!ctx->base.ctxr)
+		return -ENOMEM;
+
+	memzero_explicit(ctx->base.ctxr->data, sizeof(ctx->base.ctxr->data));
+	return 0;
+}
+
+static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	if (safexcel_cipher_cra_exit(tfm))
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_skcipher_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "skcipher: invalidation error %d\n",
+				 ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	if (safexcel_cipher_cra_exit(tfm))
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_aead_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "aead: invalidation error %d\n",
+				 ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aes_setkey,
+		.encrypt = safexcel_ecb_aes_encrypt,
+		.decrypt = safexcel_ecb_aes_decrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "safexcel-ecb-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_AES);
+}
+
+static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_AES);
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aes_setkey,
+		.encrypt = safexcel_cbc_aes_encrypt,
+		.decrypt = safexcel_cbc_aes_decrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "safexcel-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_cbc_des_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_cbc_des_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+			       unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (len != DES_KEY_SIZE) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ret = des_ekey(tmp, key);
+	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	/* if context exits and key changed, need to invalidate it */
+	if (ctx->base.ctxr_dma)
+		if (memcmp(ctx->key, key, len))
+			ctx->base.needs_inv = true;
+
+	memcpy(ctx->key, key, len);
+	ctx->key_len = len;
+
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des_setkey,
+		.encrypt = safexcel_cbc_des_encrypt,
+		.decrypt = safexcel_cbc_des_decrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = DES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "safexcel-cbc-des",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_ecb_des_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_ecb_des_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des_setkey,
+		.encrypt = safexcel_ecb_des_encrypt,
+		.decrypt = safexcel_ecb_des_decrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = DES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "safexcel-ecb-des",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
+				   const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	/* if context exits and key changed, need to invalidate it */
+	if (ctx->base.ctxr_dma) {
+		if (memcmp(ctx->key, key, len))
+			ctx->base.needs_inv = true;
+	}
+
+	memcpy(ctx->key, key, len);
+
+	ctx->key_len = len;
+
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des3_ede_setkey,
+		.encrypt = safexcel_cbc_des3_ede_encrypt,
+		.decrypt = safexcel_cbc_des3_ede_decrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "safexcel-cbc-des3_ede",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_3DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des3_ede_setkey,
+		.encrypt = safexcel_ecb_des3_ede_encrypt,
+		.decrypt = safexcel_ecb_des3_ede_decrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "safexcel-ecb-des3_ede",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_encrypt(struct aead_request *req)
+{
+	struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT,
+			CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
+}
+
+static int safexcel_aead_decrypt(struct aead_request *req)
+{
+	struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT,
+			CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
+}
+
+static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(tfm->__crt_alg, struct safexcel_alg_template,
+			     alg.aead.base);
+
+	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+				sizeof(struct safexcel_cipher_req));
+
+	ctx->priv = tmpl->priv;
+
+	ctx->aead = true;
+	ctx->base.send = safexcel_aead_send;
+	ctx->base.handle_result = safexcel_aead_handle_result;
+	return 0;
+}
+
+static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	ctx->state_sz = SHA1_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA1_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha1),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha1_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	ctx->state_sz = SHA256_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA256_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha256),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha256_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	ctx->state_sz = SHA256_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA224_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha224),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha224_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	ctx->state_sz = SHA512_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA512_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha512),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha512_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	ctx->state_sz = SHA512_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA384_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha384),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha384_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
new file mode 100644
index 0000000..ac9282c
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -0,0 +1,1659 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <crypto/hmac.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include "safexcel.h"
+
+struct safexcel_ahash_ctx {
+	struct safexcel_context base;
+	struct safexcel_crypto_priv *priv;
+
+	u32 alg;
+
+	u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+};
+
+struct safexcel_ahash_req {
+	bool last_req;
+	bool finish;
+	bool hmac;
+	bool needs_inv;
+
+	int nents;
+	dma_addr_t result_dma;
+
+	u32 digest;
+
+	u8 state_sz;    /* expected sate size, only set once */
+	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+
+	u64 len[2];
+	u64 processed[2];
+
+	u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+	dma_addr_t cache_dma;
+	unsigned int cache_sz;
+
+	u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+};
+
+static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+{
+	if (req->len[1] > req->processed[1])
+		return 0xffffffff - (req->len[0] - req->processed[0]);
+
+	return req->len[0] - req->processed[0];
+}
+
+static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
+				u32 input_length, u32 result_length)
+{
+	struct safexcel_token *token =
+		(struct safexcel_token *)cdesc->control_data.token;
+
+	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[0].packet_length = input_length;
+	token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+
+	token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
+	token[1].packet_length = result_length;
+	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
+			EIP197_TOKEN_STAT_LAST_PACKET;
+	token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+				EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+}
+
+static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+				     struct safexcel_ahash_req *req,
+				     struct safexcel_command_desc *cdesc,
+				     unsigned int digestsize)
+{
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int i;
+
+	cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
+	cdesc->control_data.control0 |= ctx->alg;
+	cdesc->control_data.control0 |= req->digest;
+
+	if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
+		if (req->processed[0] || req->processed[1]) {
+			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
+			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
+			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
+				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
+			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
+				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
+
+			cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
+		} else {
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
+		}
+
+		if (!req->finish)
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
+
+		/*
+		 * Copy the input digest if needed, and setup the context
+		 * fields. Do this now as we need it to setup the first command
+		 * descriptor.
+		 */
+		if (req->processed[0] || req->processed[1]) {
+			for (i = 0; i < digestsize / sizeof(u32); i++)
+				ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
+
+			if (req->finish) {
+				u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+				count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
+					  req->processed[1]);
+
+				/* This is a haredware limitation, as the
+				 * counter must fit into an u32. This represents
+				 * a farily big amount of input data, so we
+				 * shouldn't see this.
+				 */
+				if (unlikely(count & 0xffff0000)) {
+					dev_warn(priv->dev,
+						 "Input data is too big\n");
+					return;
+				}
+
+				ctx->base.ctxr->data[i] = cpu_to_le32(count);
+			}
+		}
+	} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
+
+		memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
+		memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
+		       ctx->opad, req->state_sz);
+	}
+}
+
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+				      struct crypto_async_request *async,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_result_desc *rdesc;
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
+	u64 cache_len;
+
+	*ret = 0;
+
+	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+	if (IS_ERR(rdesc)) {
+		dev_err(priv->dev,
+			"hash: result: could not retrieve the result descriptor\n");
+		*ret = PTR_ERR(rdesc);
+	} else {
+		*ret = safexcel_rdesc_check_errors(priv, rdesc);
+	}
+
+	safexcel_complete(priv, ring);
+
+	if (sreq->nents) {
+		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+		sreq->nents = 0;
+	}
+
+	if (sreq->result_dma) {
+		dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
+				 DMA_FROM_DEVICE);
+		sreq->result_dma = 0;
+	}
+
+	if (sreq->cache_dma) {
+		dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
+				 DMA_TO_DEVICE);
+		sreq->cache_dma = 0;
+	}
+
+	if (sreq->finish)
+		memcpy(areq->result, sreq->state,
+		       crypto_ahash_digestsize(ahash));
+
+	cache_len = safexcel_queued_len(sreq);
+	if (cache_len)
+		memcpy(sreq->cache, sreq->cache_next, cache_len);
+
+	*should_complete = true;
+
+	return 1;
+}
+
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+				   int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
+	struct safexcel_result_desc *rdesc;
+	struct scatterlist *sg;
+	int i, extra, n_cdesc = 0, ret = 0;
+	u64 queued, len, cache_len;
+
+	queued = len = safexcel_queued_len(req);
+	if (queued <= crypto_ahash_blocksize(ahash))
+		cache_len = queued;
+	else
+		cache_len = queued - areq->nbytes;
+
+	if (!req->last_req) {
+		/* If this is not the last request and the queued data does not
+		 * fit into full blocks, cache it for the next send() call.
+		 */
+		extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+		if (!extra)
+			/* If this is not the last request and the queued data
+			 * is a multiple of a block, cache the last one for now.
+			 */
+			extra = crypto_ahash_blocksize(ahash);
+
+		if (extra) {
+			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+					   req->cache_next, extra,
+					   areq->nbytes - extra);
+
+			queued -= extra;
+			len -= extra;
+
+			if (!queued) {
+				*commands = 0;
+				*results = 0;
+				return 0;
+			}
+		}
+	}
+
+	/* Add a command descriptor for the cached data, if any */
+	if (cache_len) {
+		req->cache_dma = dma_map_single(priv->dev, req->cache,
+						cache_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(priv->dev, req->cache_dma))
+			return -EINVAL;
+
+		req->cache_sz = cache_len;
+		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
+						 (cache_len == len),
+						 req->cache_dma, cache_len, len,
+						 ctx->base.ctxr_dma);
+		if (IS_ERR(first_cdesc)) {
+			ret = PTR_ERR(first_cdesc);
+			goto unmap_cache;
+		}
+		n_cdesc++;
+
+		queued -= cache_len;
+		if (!queued)
+			goto send_command;
+	}
+
+	/* Now handle the current ahash request buffer(s) */
+	req->nents = dma_map_sg(priv->dev, areq->src,
+				sg_nents_for_len(areq->src, areq->nbytes),
+				DMA_TO_DEVICE);
+	if (!req->nents) {
+		ret = -ENOMEM;
+		goto cdesc_rollback;
+	}
+
+	for_each_sg(areq->src, sg, req->nents, i) {
+		int sglen = sg_dma_len(sg);
+
+		/* Do not overflow the request */
+		if (queued < sglen)
+			sglen = queued;
+
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+					   !(queued - sglen), sg_dma_address(sg),
+					   sglen, len, ctx->base.ctxr_dma);
+		if (IS_ERR(cdesc)) {
+			ret = PTR_ERR(cdesc);
+			goto unmap_sg;
+		}
+		n_cdesc++;
+
+		if (n_cdesc == 1)
+			first_cdesc = cdesc;
+
+		queued -= sglen;
+		if (!queued)
+			break;
+	}
+
+send_command:
+	/* Setup the context options */
+	safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
+
+	/* Add the token */
+	safexcel_hash_token(first_cdesc, len, req->state_sz);
+
+	req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
+					 DMA_FROM_DEVICE);
+	if (dma_mapping_error(priv->dev, req->result_dma)) {
+		ret = -EINVAL;
+		goto unmap_sg;
+	}
+
+	/* Add a result descriptor */
+	rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
+				   req->state_sz);
+	if (IS_ERR(rdesc)) {
+		ret = PTR_ERR(rdesc);
+		goto unmap_result;
+	}
+
+	safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
+
+	req->processed[0] += len;
+	if (req->processed[0] < len)
+		req->processed[1]++;
+
+	*commands = n_cdesc;
+	*results = 1;
+	return 0;
+
+unmap_result:
+	dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+			 DMA_FROM_DEVICE);
+unmap_sg:
+	dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+cdesc_rollback:
+	for (i = 0; i < n_cdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+unmap_cache:
+	if (req->cache_dma) {
+		dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
+				 DMA_TO_DEVICE);
+		req->cache_sz = 0;
+	}
+
+	return ret;
+}
+
+static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	unsigned int state_w_sz = req->state_sz / sizeof(u32);
+	u64 processed;
+	int i;
+
+	processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+	processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
+
+	for (i = 0; i < state_w_sz; i++)
+		if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
+			return true;
+
+	if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
+		return true;
+
+	return false;
+}
+
+static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+				      int ring,
+				      struct crypto_async_request *async,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_result_desc *rdesc;
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int enq_ret;
+
+	*ret = 0;
+
+	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+	if (IS_ERR(rdesc)) {
+		dev_err(priv->dev,
+			"hash: invalidate: could not retrieve the result descriptor\n");
+		*ret = PTR_ERR(rdesc);
+	} else {
+		*ret = safexcel_rdesc_check_errors(priv, rdesc);
+	}
+
+	safexcel_complete(priv, ring);
+
+	if (ctx->base.exit_inv) {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+
+		*should_complete = true;
+		return 1;
+	}
+
+	ring = safexcel_select_ring(priv);
+	ctx->base.ring = ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	if (enq_ret != -EINPROGRESS)
+		*ret = enq_ret;
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	*should_complete = false;
+
+	return 1;
+}
+
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+				  struct crypto_async_request *async,
+				  bool *should_complete, int *ret)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	int err;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
+
+	if (req->needs_inv) {
+		req->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async,
+						 should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_ahash_send_inv(struct crypto_async_request *async,
+				   int ring, int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	int ret;
+
+	ret = safexcel_invalidate_cache(async, ctx->priv,
+					ctx->base.ctxr_dma, ring);
+	if (unlikely(ret))
+		return ret;
+
+	*commands = 1;
+	*results = 1;
+
+	return 0;
+}
+
+static int safexcel_ahash_send(struct crypto_async_request *async,
+			       int ring, int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	int ret;
+
+	if (req->needs_inv)
+		ret = safexcel_ahash_send_inv(async, ring, commands, results);
+	else
+		ret = safexcel_ahash_send_req(async, ring, commands, results);
+
+	return ret;
+}
+
+static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
+	struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
+	struct safexcel_inv_result result = {};
+	int ring = ctx->base.ring;
+
+	memset(req, 0, sizeof(struct ahash_request));
+
+	/* create invalidation request */
+	init_completion(&result.completion);
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   safexcel_inv_complete, &result);
+
+	ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+	ctx = crypto_tfm_ctx(req->base.tfm);
+	ctx->base.exit_inv = true;
+	rctx->needs_inv = true;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	wait_for_completion(&result.completion);
+
+	if (result.error) {
+		dev_warn(priv->dev, "hash: completion error (%d)\n",
+			 result.error);
+		return result.error;
+	}
+
+	return 0;
+}
+
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
+static int safexcel_ahash_cache(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	u64 queued, cache_len;
+
+	/* queued: everything accepted by the driver which will be handled by
+	 * the next send() calls.
+	 * tot sz handled by update() - tot sz handled by send()
+	 */
+	queued = safexcel_queued_len(req);
+	/* cache_len: everything accepted by the driver but not sent yet,
+	 * tot sz handled by update() - last req sz - tot sz handled by send()
+	 */
+	cache_len = queued - areq->nbytes;
+
+	/*
+	 * In case there isn't enough bytes to proceed (less than a
+	 * block size), cache the data until we have enough.
+	 */
+	if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
+		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+				   req->cache + cache_len,
+				   areq->nbytes, 0);
+		return areq->nbytes;
+	}
+
+	/* We couldn't cache all the data */
+	return -E2BIG;
+}
+
+static int safexcel_ahash_enqueue(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret, ring;
+
+	req->needs_inv = false;
+
+	if (ctx->base.ctxr) {
+		if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+		    (req->processed[0] || req->processed[1]) &&
+		    req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+			/* We're still setting needs_inv here, even though it is
+			 * cleared right away, because the needs_inv flag can be
+			 * set in other functions and we want to keep the same
+			 * logic.
+			 */
+			ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
+		if (ctx->base.needs_inv) {
+			ctx->base.needs_inv = false;
+			req->needs_inv = true;
+		}
+	} else {
+		ctx->base.ring = safexcel_select_ring(priv);
+		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+						 EIP197_GFP_FLAGS(areq->base),
+						 &ctx->base.ctxr_dma);
+		if (!ctx->base.ctxr)
+			return -ENOMEM;
+	}
+
+	ring = ctx->base.ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return ret;
+}
+
+static int safexcel_ahash_update(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+	/* If the request is 0 length, do nothing */
+	if (!areq->nbytes)
+		return 0;
+
+	req->len[0] += areq->nbytes;
+	if (req->len[0] < areq->nbytes)
+		req->len[1]++;
+
+	safexcel_ahash_cache(areq);
+
+	/*
+	 * We're not doing partial updates when performing an hmac request.
+	 * Everything will be handled by the final() call.
+	 */
+	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+		return 0;
+
+	if (req->hmac)
+		return safexcel_ahash_enqueue(areq);
+
+	if (!req->last_req &&
+	    safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
+		return safexcel_ahash_enqueue(areq);
+
+	return 0;
+}
+
+static int safexcel_ahash_final(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+
+	req->last_req = true;
+	req->finish = true;
+
+	/* If we have an overall 0 length request */
+	if (!req->len[0] && !req->len[1] && !areq->nbytes) {
+		if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+			memcpy(areq->result, md5_zero_message_hash,
+			       MD5_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+			memcpy(areq->result, sha1_zero_message_hash,
+			       SHA1_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
+			memcpy(areq->result, sha224_zero_message_hash,
+			       SHA224_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
+			memcpy(areq->result, sha256_zero_message_hash,
+			       SHA256_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
+			memcpy(areq->result, sha384_zero_message_hash,
+			       SHA384_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+			memcpy(areq->result, sha512_zero_message_hash,
+			       SHA512_DIGEST_SIZE);
+
+		return 0;
+	}
+
+	return safexcel_ahash_enqueue(areq);
+}
+
+static int safexcel_ahash_finup(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	req->last_req = true;
+	req->finish = true;
+
+	safexcel_ahash_update(areq);
+	return safexcel_ahash_final(areq);
+}
+
+static int safexcel_ahash_export(struct ahash_request *areq, void *out)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_export_state *export = out;
+
+	export->len[0] = req->len[0];
+	export->len[1] = req->len[1];
+	export->processed[0] = req->processed[0];
+	export->processed[1] = req->processed[1];
+
+	export->digest = req->digest;
+
+	memcpy(export->state, req->state, req->state_sz);
+	memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+
+	return 0;
+}
+
+static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	const struct safexcel_ahash_export_state *export = in;
+	int ret;
+
+	ret = crypto_ahash_init(areq);
+	if (ret)
+		return ret;
+
+	req->len[0] = export->len[0];
+	req->len[1] = export->len[1];
+	req->processed[0] = export->processed[0];
+	req->processed[1] = export->processed[1];
+
+	req->digest = export->digest;
+
+	memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+	memcpy(req->state, export->state, req->state_sz);
+
+	return 0;
+}
+
+static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(__crypto_ahash_alg(tfm->__crt_alg),
+			     struct safexcel_alg_template, alg.ahash);
+
+	ctx->priv = tmpl->priv;
+	ctx->base.send = safexcel_ahash_send;
+	ctx->base.handle_result = safexcel_handle_result;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct safexcel_ahash_req));
+	return 0;
+}
+
+static int safexcel_sha1_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = SHA1_H0;
+	req->state[1] = SHA1_H1;
+	req->state[2] = SHA1_H2;
+	req->state[3] = SHA1_H3;
+	req->state[4] = SHA1_H4;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA1_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha1_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha1_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	/* context not allocated, skip invalidation */
+	if (!ctx->base.ctxr)
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_ahash_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+struct safexcel_alg_template safexcel_alg_sha1 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha1_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha1_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "safexcel-sha1",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha1_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha1_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha1_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_ahash_result {
+	struct completion completion;
+	int error;
+};
+
+static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
+{
+	struct safexcel_ahash_result *result = req->data;
+
+	if (error == -EINPROGRESS)
+		return;
+
+	result->error = error;
+	complete(&result->completion);
+}
+
+static int safexcel_hmac_init_pad(struct ahash_request *areq,
+				  unsigned int blocksize, const u8 *key,
+				  unsigned int keylen, u8 *ipad, u8 *opad)
+{
+	struct safexcel_ahash_result result;
+	struct scatterlist sg;
+	int ret, i;
+	u8 *keydup;
+
+	if (keylen <= blocksize) {
+		memcpy(ipad, key, keylen);
+	} else {
+		keydup = kmemdup(key, keylen, GFP_KERNEL);
+		if (!keydup)
+			return -ENOMEM;
+
+		ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					   safexcel_ahash_complete, &result);
+		sg_init_one(&sg, keydup, keylen);
+		ahash_request_set_crypt(areq, &sg, ipad, keylen);
+		init_completion(&result.completion);
+
+		ret = crypto_ahash_digest(areq);
+		if (ret == -EINPROGRESS || ret == -EBUSY) {
+			wait_for_completion_interruptible(&result.completion);
+			ret = result.error;
+		}
+
+		/* Avoid leaking */
+		memzero_explicit(keydup, keylen);
+		kfree(keydup);
+
+		if (ret)
+			return ret;
+
+		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
+	}
+
+	memset(ipad + keylen, 0, blocksize - keylen);
+	memcpy(opad, ipad, blocksize);
+
+	for (i = 0; i < blocksize; i++) {
+		ipad[i] ^= HMAC_IPAD_VALUE;
+		opad[i] ^= HMAC_OPAD_VALUE;
+	}
+
+	return 0;
+}
+
+static int safexcel_hmac_init_iv(struct ahash_request *areq,
+				 unsigned int blocksize, u8 *pad, void *state)
+{
+	struct safexcel_ahash_result result;
+	struct safexcel_ahash_req *req;
+	struct scatterlist sg;
+	int ret;
+
+	ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   safexcel_ahash_complete, &result);
+	sg_init_one(&sg, pad, blocksize);
+	ahash_request_set_crypt(areq, &sg, pad, blocksize);
+	init_completion(&result.completion);
+
+	ret = crypto_ahash_init(areq);
+	if (ret)
+		return ret;
+
+	req = ahash_request_ctx(areq);
+	req->hmac = true;
+	req->last_req = true;
+
+	ret = crypto_ahash_update(areq);
+	if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+		return ret;
+
+	wait_for_completion_interruptible(&result.completion);
+	if (result.error)
+		return result.error;
+
+	return crypto_ahash_export(areq, state);
+}
+
+int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
+			 void *istate, void *ostate)
+{
+	struct ahash_request *areq;
+	struct crypto_ahash *tfm;
+	unsigned int blocksize;
+	u8 *ipad, *opad;
+	int ret;
+
+	tfm = crypto_alloc_ahash(alg, 0, 0);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	areq = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!areq) {
+		ret = -ENOMEM;
+		goto free_ahash;
+	}
+
+	crypto_ahash_clear_flags(tfm, ~0);
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	ipad = kcalloc(2, blocksize, GFP_KERNEL);
+	if (!ipad) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+
+	opad = ipad + blocksize;
+
+	ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
+	if (ret)
+		goto free_ipad;
+
+	ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
+	if (ret)
+		goto free_ipad;
+
+	ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
+
+free_ipad:
+	kfree(ipad);
+free_request:
+	ahash_request_free(areq);
+free_ahash:
+	crypto_free_ahash(tfm);
+
+	return ret;
+}
+
+static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
+				    unsigned int keylen, const char *alg,
+				    unsigned int state_sz)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_ahash_export_state istate, ostate;
+	int ret, i;
+
+	ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
+	if (ret)
+		return ret;
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
+		for (i = 0; i < state_sz / sizeof(u32); i++) {
+			if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+			    ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	memcpy(ctx->ipad, &istate.state, state_sz);
+	memcpy(ctx->opad, &ostate.state, state_sz);
+
+	return 0;
+}
+
+static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
+					SHA1_DIGEST_SIZE);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha1_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha1_digest,
+		.setkey = safexcel_hmac_sha1_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha1)",
+				.cra_driver_name = "safexcel-hmac-sha1",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha256_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = SHA256_H0;
+	req->state[1] = SHA256_H1;
+	req->state[2] = SHA256_H2;
+	req->state[3] = SHA256_H3;
+	req->state[4] = SHA256_H4;
+	req->state[5] = SHA256_H5;
+	req->state[6] = SHA256_H6;
+	req->state[7] = SHA256_H7;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha256_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha256_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha256 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha256_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha256_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "safexcel-sha256",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha224_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = SHA224_H0;
+	req->state[1] = SHA224_H1;
+	req->state[2] = SHA224_H2;
+	req->state[3] = SHA224_H3;
+	req->state[4] = SHA224_H4;
+	req->state[5] = SHA224_H5;
+	req->state[6] = SHA224_H6;
+	req->state[7] = SHA224_H7;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha224_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha224_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha224 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha224_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha224_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha224",
+				.cra_driver_name = "safexcel-sha224",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
+					SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha224_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha224_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha224_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha224_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha224_digest,
+		.setkey = safexcel_hmac_sha224_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha224)",
+				.cra_driver_name = "safexcel-hmac-sha224",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
+					SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha256_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha256_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha256_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha256_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha256_digest,
+		.setkey = safexcel_hmac_sha256_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha256)",
+				.cra_driver_name = "safexcel-hmac-sha256",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha512_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = lower_32_bits(SHA512_H0);
+	req->state[1] = upper_32_bits(SHA512_H0);
+	req->state[2] = lower_32_bits(SHA512_H1);
+	req->state[3] = upper_32_bits(SHA512_H1);
+	req->state[4] = lower_32_bits(SHA512_H2);
+	req->state[5] = upper_32_bits(SHA512_H2);
+	req->state[6] = lower_32_bits(SHA512_H3);
+	req->state[7] = upper_32_bits(SHA512_H3);
+	req->state[8] = lower_32_bits(SHA512_H4);
+	req->state[9] = upper_32_bits(SHA512_H4);
+	req->state[10] = lower_32_bits(SHA512_H5);
+	req->state[11] = upper_32_bits(SHA512_H5);
+	req->state[12] = lower_32_bits(SHA512_H6);
+	req->state[13] = upper_32_bits(SHA512_H6);
+	req->state[14] = lower_32_bits(SHA512_H7);
+	req->state[15] = upper_32_bits(SHA512_H7);
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha512_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha512_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha512 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha512_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha512_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA512_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha512",
+				.cra_driver_name = "safexcel-sha512",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha384_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = lower_32_bits(SHA384_H0);
+	req->state[1] = upper_32_bits(SHA384_H0);
+	req->state[2] = lower_32_bits(SHA384_H1);
+	req->state[3] = upper_32_bits(SHA384_H1);
+	req->state[4] = lower_32_bits(SHA384_H2);
+	req->state[5] = upper_32_bits(SHA384_H2);
+	req->state[6] = lower_32_bits(SHA384_H3);
+	req->state[7] = upper_32_bits(SHA384_H3);
+	req->state[8] = lower_32_bits(SHA384_H4);
+	req->state[9] = upper_32_bits(SHA384_H4);
+	req->state[10] = lower_32_bits(SHA384_H5);
+	req->state[11] = upper_32_bits(SHA384_H5);
+	req->state[12] = lower_32_bits(SHA384_H6);
+	req->state[13] = upper_32_bits(SHA384_H6);
+	req->state[14] = lower_32_bits(SHA384_H7);
+	req->state[15] = upper_32_bits(SHA384_H7);
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha384_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha384_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha384 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha384_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha384_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA384_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha384",
+				.cra_driver_name = "safexcel-sha384",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
+					SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha512_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha512_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha512_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha512_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha512_digest,
+		.setkey = safexcel_hmac_sha512_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA512_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha512)",
+				.cra_driver_name = "safexcel-hmac-sha512",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
+					SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha384_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha384_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha384_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha384_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha384_digest,
+		.setkey = safexcel_hmac_sha384_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA384_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha384)",
+				.cra_driver_name = "safexcel-hmac-sha384",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_md5_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = MD5_H0;
+	req->state[1] = MD5_H1;
+	req->state[2] = MD5_H2;
+	req->state[3] = MD5_H3;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = MD5_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_md5_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_md5_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_md5 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_md5_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_md5_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "safexcel-md5",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_md5_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_md5_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
+					MD5_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_md5_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_md5_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_md5 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_md5_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_md5_digest,
+		.setkey = safexcel_hmac_md5_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(md5)",
+				.cra_driver_name = "safexcel-hmac-md5",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
new file mode 100644
index 0000000..eb75fa6
--- /dev/null
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+
+#include "safexcel.h"
+
+int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
+				   struct safexcel_desc_ring *cdr,
+				   struct safexcel_desc_ring *rdr)
+{
+	cdr->offset = sizeof(u32) * priv->config.cd_offset;
+	cdr->base = dmam_alloc_coherent(priv->dev,
+					cdr->offset * EIP197_DEFAULT_RING_SIZE,
+					&cdr->base_dma, GFP_KERNEL);
+	if (!cdr->base)
+		return -ENOMEM;
+	cdr->write = cdr->base;
+	cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
+	cdr->read = cdr->base;
+
+	rdr->offset = sizeof(u32) * priv->config.rd_offset;
+	rdr->base = dmam_alloc_coherent(priv->dev,
+					rdr->offset * EIP197_DEFAULT_RING_SIZE,
+					&rdr->base_dma, GFP_KERNEL);
+	if (!rdr->base)
+		return -ENOMEM;
+	rdr->write = rdr->base;
+	rdr->base_end = rdr->base + rdr->offset  * (EIP197_DEFAULT_RING_SIZE - 1);
+	rdr->read = rdr->base;
+
+	return 0;
+}
+
+inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
+{
+	return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
+}
+
+static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
+				     struct safexcel_desc_ring *ring)
+{
+	void *ptr = ring->write;
+
+	if ((ring->write == ring->read - ring->offset) ||
+	    (ring->read == ring->base && ring->write == ring->base_end))
+		return ERR_PTR(-ENOMEM);
+
+	if (ring->write == ring->base_end)
+		ring->write = ring->base;
+	else
+		ring->write += ring->offset;
+
+	return ptr;
+}
+
+void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
+			      struct safexcel_desc_ring *ring)
+{
+	void *ptr = ring->read;
+
+	if (ring->write == ring->read)
+		return ERR_PTR(-ENOENT);
+
+	if (ring->read == ring->base_end)
+		ring->read = ring->base;
+	else
+		ring->read += ring->offset;
+
+	return ptr;
+}
+
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+				     int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+					 int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+					 int ring,
+					 struct safexcel_result_desc *rdesc)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+				 struct safexcel_desc_ring *ring)
+{
+	if (ring->write == ring->read)
+		return;
+
+	if (ring->write == ring->base)
+		ring->write = ring->base_end;
+	else
+		ring->write -= ring->offset;
+}
+
+struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						 bool first, bool last,
+						 dma_addr_t data, u32 data_len,
+						 u32 full_data_len,
+						 dma_addr_t context) {
+	struct safexcel_command_desc *cdesc;
+	int i;
+
+	cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+	if (IS_ERR(cdesc))
+		return cdesc;
+
+	memset(cdesc, 0, sizeof(struct safexcel_command_desc));
+
+	cdesc->first_seg = first;
+	cdesc->last_seg = last;
+	cdesc->particle_size = data_len;
+	cdesc->data_lo = lower_32_bits(data);
+	cdesc->data_hi = upper_32_bits(data);
+
+	if (first && context) {
+		struct safexcel_token *token =
+			(struct safexcel_token *)cdesc->control_data.token;
+
+		cdesc->control_data.packet_length = full_data_len;
+		cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
+					      EIP197_OPTION_64BIT_CTX |
+					      EIP197_OPTION_CTX_CTRL_IN_CMD;
+		cdesc->control_data.context_lo =
+			(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+		cdesc->control_data.context_hi = upper_32_bits(context);
+
+		/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
+		cdesc->control_data.refresh = 2;
+
+		for (i = 0; i < EIP197_MAX_TOKENS; i++)
+			eip197_noop_token(&token[i]);
+	}
+
+	return cdesc;
+}
+
+struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+						int ring_id,
+						bool first, bool last,
+						dma_addr_t data, u32 len)
+{
+	struct safexcel_result_desc *rdesc;
+
+	rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+	if (IS_ERR(rdesc))
+		return rdesc;
+
+	memset(rdesc, 0, sizeof(struct safexcel_result_desc));
+
+	rdesc->first_seg = first;
+	rdesc->last_seg = last;
+	rdesc->particle_size = len;
+	rdesc->data_lo = lower_32_bits(data);
+	rdesc->data_hi = upper_32_bits(data);
+
+	return rdesc;
+}