Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 1eb8527..137ed3d 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,7 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+config CRYPTO_DEV_FSL_CAAM_COMMON
+ tristate
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ tristate
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ tristate
+
config CRYPTO_DEV_FSL_CAAM
- tristate "Freescale CAAM-Multicore driver backend"
+ tristate "Freescale CAAM-Multicore platform driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
select SOC_BUS
+ select CRYPTO_DEV_FSL_CAAM_COMMON
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -12,9 +23,16 @@
To compile this driver as a module, choose M here: the module
will be called caam.
-config CRYPTO_DEV_FSL_CAAM_JR
+if CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
+
+menuconfig CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
- depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enables the driver module for Job Rings which are part of
@@ -25,9 +43,10 @@
To compile this driver as a module, choose M here: the module
will be called caam_jr.
+if CRYPTO_DEV_FSL_CAAM_JR
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -45,7 +64,6 @@
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM_JR
help
Enable the Job Ring's interrupt coalescing feature.
@@ -74,26 +92,26 @@
threshold. Range is 1-65535.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
- tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
+ bool "Register algorithm implementations with the Crypto API"
default y
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_LIB_DES
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
stack) to the SEC4 via job ring.
- To compile this as a module, choose M here: the module
- will be called caamalg.
-
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
- tristate "Queue Interface as Crypto API backend"
- depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
+ bool "Queue Interface as Crypto API backend"
+ depends on FSL_DPAA && NET
default y
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
help
Selecting this will use CAAM Queue Interface (QI) for sending
& receiving crypto jobs to/from CAAM. This gives better performance
@@ -102,36 +120,26 @@
assigned to the kernel should also be more than the number of
job rings.
- To compile this as a module, choose M here: the module
- will be called caamalg_qi.
-
config CRYPTO_DEV_FSL_CAAM_AHASH_API
- tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
+ bool "Register hash algorithm implementations with Crypto API"
default y
+ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
select CRYPTO_HASH
help
Selecting this will offload ahash for users of the
scatterlist crypto API to the SEC4 via job ring.
- To compile this as a module, choose M here: the module
- will be called caamhash.
-
config CRYPTO_DEV_FSL_CAAM_PKC_API
- tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
+ bool "Register public key cryptography implementations with Crypto API"
default y
select CRYPTO_RSA
help
Selecting this will allow SEC Public key support for RSA.
Supported cryptographic primitives: encryption, decryption,
signature and verification.
- To compile this as a module, choose M here: the module
- will be called caam_pkc.
config CRYPTO_DEV_FSL_CAAM_RNG_API
- tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
+ bool "Register caam device for hwrng API"
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -139,16 +147,26 @@
Selecting this will register the SEC4 hardware rng to
the hw_random API for suppying the kernel entropy pool.
- To compile this as a module, choose M here: the module
- will be called caamrng.
+endif # CRYPTO_DEV_FSL_CAAM_JR
-config CRYPTO_DEV_FSL_CAAM_DEBUG
- bool "Enable debug output in CAAM driver"
- depends on CRYPTO_DEV_FSL_CAAM
+endif # CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_DPAA2_CAAM
+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
+ depends on FSL_MC_DPIO
+ depends on NETDEVICES
+ select CRYPTO_DEV_FSL_CAAM_COMMON
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
+ select CRYPTO_HASH
+ select CRYPTO_DES
help
- Selecting this will enable printing of various debug
- information in the CAAM driver.
+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ It handles DPSECI DPAA2 objects that sit on the Management Complex
+ (MC) fsl-mc bus.
-config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
+ To compile this as a module, choose M here: the module
+ will be called dpaa2_caam.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index cb652ee..68d5cc0 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,19 +6,28 @@
ccflags-y := -DDEBUG
endif
+ccflags-y += -DVERSION=\"\"
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
-caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o error.o
-caam_pkc-y := caampkc.o pkc_desc.o
+caam-y := ctrl.o
+caam_jr-y := jr.o key_gen.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
+
+caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
- caam-objs += qi.o
endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
+
+dpaa2_caam-y := caamalg_qi2.o dpseci.o
+dpaa2_caam-$(CONFIG_DEBUG_FS) += dpseci-debugfs.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ec40f99..2912006 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,8 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
* Based on talitos crypto API driver.
*
@@ -71,23 +72,17 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
-#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
+#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
+
+#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
-#ifdef DEBUG
-/* for print_hex_dumps with line references */
-#define debug(format, arg...) printk(format, arg)
-#else
-#define debug(format, arg...)
-#endif
-
-static struct list_head alg_list;
-
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -96,17 +91,21 @@
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
struct caam_ctx {
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
- dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev;
@@ -206,6 +205,18 @@
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
}
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm>
+ * would result in invalid opcodes (last bytes of user key) in
+ * the resulting descriptor. Use DKP<ptr,imm> instead => both
+ * virtual and dma key addresses are needed.
+ */
+ ctx->adata.key_virt = ctx->key;
+ ctx->adata.key_dma = ctx->key_dma;
+
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
@@ -222,16 +233,6 @@
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -254,16 +255,6 @@
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -288,16 +279,6 @@
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -377,6 +358,11 @@
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_gcm_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
gcm_set_sh_desc(authenc);
@@ -440,6 +426,11 @@
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_rfc4106_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
rfc4106_set_sh_desc(authenc);
@@ -504,12 +495,70 @@
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ if (authsize != 16)
+ return -EINVAL;
+
ctx->authsize = authsize;
rfc4543_set_sh_desc(authenc);
return 0;
}
+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 *desc;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, true, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), ctx->dir);
+
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, false, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), ctx->dir);
+
+ return 0;
+}
+
+static int chachapoly_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+
+ ctx->authsize = authsize;
+ return chachapoly_set_sh_desc(aead);
+}
+
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+
+ if (keylen != CHACHA_KEY_SIZE + saltlen) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.key_virt = key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+}
+
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
@@ -522,13 +571,11 @@
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
-#ifdef DEBUG
- printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+ dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/*
* If DKP is supported, use it in the shared descriptor to generate
@@ -562,11 +609,10 @@
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
-#endif
+
+ print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
@@ -578,16 +624,38 @@
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ return err;
+
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
+ aead_setkey(aead, key, keylen);
+
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ int err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
@@ -601,14 +669,16 @@
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ int err;
- if (keylen < 4)
- return -EINVAL;
+ err = aes_check_keylen(keylen - 4);
+ if (err) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
@@ -627,14 +697,16 @@
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ int err;
- if (keylen < 4)
- return -EINVAL;
+ err = aes_check_keylen(keylen - 4);
+ if (err) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
@@ -648,81 +720,133 @@
return rfc4543_set_sh_desc(aead);
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
- u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode &&
- (strstr(alg_name, "rfc3686") != NULL));
+ const bool is_rfc3686 = alg->caam.rfc3686;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
- /*
- * AES-CTR needs to load IV in CONTEXT1 reg
- * at an offset of 128bits (16bytes)
- * CONTEXT1[255:128] = IV
- */
- if (ctr_mode)
- ctx1_iv_off = 16;
+ print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* skcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), ctx->dir);
+
+ /* skcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), ctx->dir);
+
+ return 0;
+}
+
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
/*
* RFC3686 specific:
* | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
* | *key = {KEY, NONCE}
*/
- if (is_rfc3686) {
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- keylen -= CTR_RFC3686_NONCE_SIZE;
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
}
- ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = key;
- ctx->cdata.key_inline = true;
-
- /* ablkcipher_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
- cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), ctx->dir);
-
- /* ablkcipher_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
- cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), ctx->dir);
-
- /* ablkcipher_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
- cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), ctx->dir);
-
- return 0;
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ ctx1_iv_off = 16;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des3_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(ablkcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -731,15 +855,15 @@
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts_ablkcipher_encrypt shared descriptor */
+ /* xts_skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* xts_ablkcipher_decrypt shared descriptor */
+ /* xts_skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -750,6 +874,8 @@
* aead_edesc - s/w-extended aead descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
@@ -758,6 +884,8 @@
struct aead_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -765,22 +893,24 @@
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
* @iv_dma: dma address of iv for checking continuity and link table
- * @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* and IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
+ int mapped_src_nents;
+ int mapped_dst_nents;
dma_addr_t iv_dma;
- enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -790,20 +920,20 @@
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents,
- dma_addr_t iv_dma, int ivsize,
- enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes)
{
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -814,20 +944,19 @@
struct aead_request *req)
{
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+ edesc->src_nents, edesc->dst_nents, 0, 0,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->iv_dir,
+ edesc->iv_dma, ivsize,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -836,21 +965,20 @@
{
struct aead_request *req = context;
struct aead_edesc *edesc;
+ int ecode = 0;
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
kfree(edesc);
- aead_request_complete(req, err);
+ aead_request_complete(req, ecode);
}
static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -858,110 +986,100 @@
{
struct aead_request *req = context;
struct aead_edesc *edesc;
+ int ecode = 0;
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
aead_unmap(jrdev, edesc, req);
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
- err = -EBADMSG;
-
kfree(edesc);
- aead_request_complete(req, err);
+ aead_request_complete(req, ecode);
}
-static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ecode = 0;
-#ifdef DEBUG
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- edesc->src_nents > 1 ? 100 : ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
-
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
- ivsize, 0);
-
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->iv_dir == DMA_FROM_DEVICE) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
- edesc->sec4_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
+ if (ivsize && !ecode) {
+ memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ ivsize);
+ print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
}
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, ecode);
}
-static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ecode = 0;
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ skcipher_unmap(jrdev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ if (ivsize && !ecode) {
+ memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
+ ivsize);
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+ }
+
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, ecode);
}
/*
@@ -988,11 +1106,12 @@
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
- src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
+ src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
+ 0;
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index += edesc->mapped_src_nents;
in_options = LDST_SGF;
}
@@ -1003,8 +1122,12 @@
out_options = in_options;
if (unlikely(req->src != req->dst)) {
- if (edesc->dst_nents == 1) {
+ if (!edesc->mapped_dst_nents) {
+ dst_dma = 0;
+ out_options = 0;
+ } else if (edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
+ out_options = 0;
} else {
dst_dma = edesc->sec4_sg_dma +
sec4_sg_index *
@@ -1053,6 +1176,40 @@
/* End of blank commands */
}
+static void init_chachapoly_job(struct aead_request *req,
+ struct aead_edesc *edesc, bool all_contig,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int assoclen = req->assoclen;
+ u32 *desc = edesc->hw_desc;
+ u32 ctx_iv_off = 4;
+
+ init_aead_job(req, edesc, all_contig, encrypt);
+
+ if (ivsize != CHACHAPOLY_IV_SIZE) {
+ /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
+ ctx_iv_off += 4;
+
+ /*
+ * The associated data comes already with the IV but we need
+ * to skip it when we authenticate or encrypt...
+ */
+ assoclen -= ivsize;
+ }
+
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
+
+ /*
+ * For IPsec load the IV further in the same register.
+ * For RFC7539 simply load the 12 bytes nonce in a single operation
+ */
+ append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ctx_iv_off << LDST_OFFSET_SHIFT);
+}
+
static void init_authenc_job(struct aead_request *req,
struct aead_edesc *edesc,
bool all_contig, bool encrypt)
@@ -1103,90 +1260,59 @@
}
/*
- * Fill in ablkcipher job descriptor
+ * Fill in skcipher job descriptor
*/
-static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void init_skcipher_job(struct skcipher_request *req,
+ struct skcipher_edesc *edesc,
+ const bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *jrdev = ctx->jrdev;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
- u32 out_options = 0;
- dma_addr_t dst_dma;
- int len;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
- pr_err("asked=%d, nbytes%d\n",
- (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
-#endif
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
- LDST_SGF);
-
- if (likely(req->src == req->dst)) {
- dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- } else {
- if (edesc->dst_nents == 1) {
- dst_dma = sg_dma_address(req->dst);
- } else {
- dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
- }
- append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
-}
-
-/*
- * Fill in ablkcipher givencrypt job descriptor
- */
-static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- u32 *desc = edesc->hw_desc;
- u32 in_options;
- dma_addr_t dst_dma, src_dma;
+ u32 *sh_desc;
+ u32 in_options = 0, out_options = 0;
+ dma_addr_t src_dma, dst_dma, ptr;
int len, sec4_sg_index = 0;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
+ (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
+
+ caam_dump_sg("src @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- if (edesc->src_nents == 1) {
- src_dma = sg_dma_address(req->src);
- in_options = 0;
- } else {
+ if (ivsize || edesc->mapped_src_nents > 1) {
src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
+ sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
in_options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
}
- append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
- dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
+
+ if (likely(req->src == req->dst)) {
+ dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
+ out_options = in_options;
+ } else if (!ivsize && edesc->mapped_dst_nents == 1) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
}
/*
@@ -1202,37 +1328,36 @@
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
unsigned int authsize = ctx->authsize;
if (unlikely(req->dst != req->src)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
return ERR_PTR(dst_nents);
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
return ERR_PTR(src_nents);
}
}
@@ -1257,17 +1382,32 @@
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
+ /* Cover also the case of null (zero length) output data */
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(jrdev, req->dst,
+ dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
}
+ /*
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
- sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ if (mapped_dst_nents > 1)
+ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
+ else
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
+
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
@@ -1275,24 +1415,26 @@
GFP_DMA | flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
*all_contig_ptr = !(mapped_src_nents > 1);
sec4_sg_index = 0;
if (mapped_src_nents > 1) {
- sg_to_sec4_sg_last(req->src, mapped_src_nents,
+ sg_to_sec4_sg_last(req->src, src_len,
edesc->sec4_sg + sec4_sg_index, 0);
sec4_sg_index += mapped_src_nents;
}
if (mapped_dst_nents > 1) {
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+ sg_to_sec4_sg_last(req->dst, dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
}
@@ -1330,11 +1472,10 @@
/* Create and submit job descriptor */
init_gcm_job(req, edesc, all_contig, true);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
@@ -1348,12 +1489,75 @@
return ret;
}
+static int chachapoly_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret;
+
+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
+ true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ desc = edesc->hw_desc;
+
+ init_chachapoly_job(req, edesc, all_contig, true);
+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int chachapoly_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret;
+
+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
+ false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ desc = edesc->hw_desc;
+
+ init_chachapoly_job(req, edesc, all_contig, false);
+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
static int ipsec_gcm_encrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return gcm_encrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
}
static int aead_encrypt(struct aead_request *req)
@@ -1374,11 +1578,10 @@
/* Create and submit job descriptor */
init_authenc_job(req, edesc, all_contig, true);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
@@ -1409,11 +1612,10 @@
/* Create and submit job descriptor*/
init_gcm_job(req, edesc, all_contig, false);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
@@ -1429,10 +1631,7 @@
static int ipsec_gcm_decrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return gcm_decrypt(req);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
}
static int aead_decrypt(struct aead_request *req)
@@ -1445,7 +1644,7 @@
u32 *desc;
int ret = 0;
- caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+ caam_dump_sg("dec src@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
req->assoclen + req->cryptlen, 1);
@@ -1457,11 +1656,10 @@
/* Create and submit job descriptor*/
init_authenc_job(req, edesc, all_contig, false);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+
+ print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
desc = edesc->hw_desc;
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
@@ -1476,35 +1674,35 @@
}
/*
- * allocate and map the ablkcipher extended descriptor for ablkcipher
+ * allocate and map the skcipher extended descriptor for skcipher
*/
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, int desc_bytes)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ int desc_bytes)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
+ struct skcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (req->dst != req->src) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
}
@@ -1523,7 +1721,6 @@
dev_err(jrdev, "unable to map source\n");
return ERR_PTR(-ENOMEM);
}
-
mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
DMA_FROM_DEVICE);
if (unlikely(!mapped_dst_nents)) {
@@ -1533,9 +1730,34 @@
}
}
- sec4_sg_ents = 1 + mapped_src_nents;
+ if (!ivsize && mapped_src_nents == 1)
+ sec4_sg_ents = 0; // no need for an input hw s/g table
+ else
+ sec4_sg_ents = mapped_src_nents + !!ivsize;
dst_sg_idx = sec4_sg_ents;
- sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (output S/G)
+ * pad output S/G, if needed
+ * else if (input S/G) ...
+ * pad input S/G, if needed
+ */
+ if (ivsize || mapped_dst_nents > 1) {
+ if (req->src == req->dst)
+ sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
+ else
+ sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
+ !!ivsize);
+ } else {
+ sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
+ }
+
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
/*
@@ -1546,415 +1768,310 @@
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
+ edesc->mapped_src_nents = mapped_src_nents;
+ edesc->mapped_dst_nents = mapped_dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
- edesc->iv_dir = DMA_TO_DEVICE;
/* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->info, ivsize);
+ if (ivsize) {
+ iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
+ memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
}
+ if (dst_sg_idx)
+ sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
+ !!ivsize, 0);
- dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+ if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
+ sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
+ dst_sg_idx, 0);
- if (mapped_dst_nents > 1) {
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
- edesc->sec4_sg + dst_sg_idx, 0);
- }
+ if (ivsize)
+ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
+ mapped_dst_nents, iv_dma, ivsize, 0);
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ if (ivsize || mapped_dst_nents > 1)
+ sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
+ mapped_dst_nents);
+
+ if (sec4_sg_bytes) {
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, iv_dma, ivsize, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
}
edesc->iv_dma = iv_dma;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
- sec4_sg_bytes, 1);
-#endif
+ print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
return edesc;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
+ if (!req->cryptlen)
+ return 0;
+
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
+ init_skcipher_job(req, edesc, true);
+
+ print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
+ if (!req->cryptlen)
+ return 0;
+
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- /*
- * The crypto API expects us to set the IV (req->info) to the last
- * ciphertext block.
- */
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
- ivsize, 0);
-
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+ init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-/*
- * allocate and map the ablkcipher extended descriptor
- * for ablkcipher givencrypt
- */
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *greq,
- int desc_bytes)
-{
- struct ablkcipher_request *req = &greq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (likely(req->src == req->dst)) {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- } else {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- }
-
- sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = sec4_sg_ents;
- sec4_sg_ents += 1 + mapped_dst_nents;
-
- /*
- * allocate space for base edesc and hw desc commands, link tables, IV
- */
- sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
- desc_bytes);
- edesc->iv_dir = DMA_FROM_DEVICE;
-
- /* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents > 1)
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
- 0);
-
- dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
- dst_sg_idx + 1, 0);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
- edesc->iv_dma = iv_dma;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
- sec4_sg_bytes, 1);
-#endif
-
- return edesc;
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor*/
- init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
- edesc, req);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
-
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
-}
-
-#define template_aead template_u.aead
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aes_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = ctr_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc3686_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aes_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ecb(arc4)",
+ .cra_driver_name = "ecb-arc4-caam",
+ .cra_blocksize = ARC4_BLOCK_SIZE,
+ },
+ .setkey = arc4_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
},
};
@@ -1975,6 +2092,7 @@
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1993,6 +2111,7 @@
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -2012,6 +2131,7 @@
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* single-pass ipsec_esp descriptor */
@@ -2413,7 +2533,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2435,7 +2555,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2458,7 +2578,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2481,7 +2601,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2504,7 +2624,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2527,7 +2647,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2550,7 +2670,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2573,7 +2693,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2596,7 +2716,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2619,7 +2739,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2642,7 +2762,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2665,7 +2785,7 @@
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -3237,12 +3357,52 @@
.geniv = true,
},
},
-};
-
-struct caam_crypto_alg {
- struct crypto_alg crypto_alg;
- struct list_head entry;
- struct caam_alg_entry caam;
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
+ "caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = chachapoly_encrypt,
+ .decrypt = chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ .nodkp = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539esp-chacha20-"
+ "poly1305-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = chachapoly_encrypt,
+ .decrypt = chachapoly_decrypt,
+ .ivsize = 8,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ .nodkp = true,
+ },
+ },
};
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
@@ -3276,8 +3436,6 @@
ctx->sh_desc_enc_dma = dma_addr;
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
sh_desc_dec);
- ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
- sh_desc_givenc);
ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
/* copy descriptor header template value */
@@ -3287,14 +3445,14 @@
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg =
- container_of(alg, struct caam_crypto_alg, crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3304,8 +3462,7 @@
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam,
- alg->setkey == aead_setkey);
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -3316,9 +3473,9 @@
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3326,10 +3483,8 @@
caam_exit_common(crypto_aead_ctx(tfm));
}
-static void __exit caam_algapi_exit(void)
+void caam_algapi_exit(void)
{
-
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -3339,57 +3494,25 @@
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg = &t_alg->skcipher;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- pr_err("failed to allocate t_alg\n");
- return ERR_PTR(-ENOMEM);
- }
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
-
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -3405,62 +3528,65 @@
alg->exit = caam_aead_exit;
}
-static int __init caam_algapi_init(void)
+int caam_algapi_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int i = 0, err = 0;
- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
+ u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
- bool registered = false;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv)
- return -ENODEV;
-
-
- INIT_LIST_HEAD(&alg_list);
+ bool registered = false, gcm_support;
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ if (priv->era < 10) {
+ u32 cha_vid, cha_inst, aes_rn;
+
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
+ CHA_ID_LS_DES_SHIFT;
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
+ CHA_ID_LS_ARC4_SHIFT;
+ ccha_inst = 0;
+ ptha_inst = 0;
+
+ aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
+ CHA_ID_LS_AES_MASK;
+ gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
+ } else {
+ u32 aesa, mdha;
+
+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+
+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ aes_inst = aesa & CHA_VER_NUM_MASK;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
+ ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
+
+ gcm_support = aesa & CHA_VER_MISC_AES_GCM;
+ }
/* If MD is present, limit digest size based on LP256 */
- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -3472,31 +3598,29 @@
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /* Skip ARC4 algorithms if not supported by device */
+ if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
+ continue;
+
/*
* Check support for AES modes not available
* on LP devices.
*/
- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_XTS)
- continue;
-
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n", alg->driver_name);
+ if (aes_vid == CHA_VER_VID_AES_LP &&
+ (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_XTS)
continue;
- }
- err = crypto_register_alg(&t_alg->crypto_alg);
+ caam_skcipher_alg_init(t_alg);
+
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
@@ -3518,21 +3642,26 @@
if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
continue;
- /*
- * Check support for AES algorithms not available
- * on LP devices.
- */
- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if (alg_aai == OP_ALG_AAI_GCM)
- continue;
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
+ continue;
+
+ /* Skip POLY1305 algorithms if not supported by device */
+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
+ continue;
+
+ /* Skip GCM algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
+ alg_aai == OP_ALG_AAI_GCM && !gcm_support)
+ continue;
/*
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
- if (c2_alg_sel &&
- (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
- continue;
+ if (is_mdha(c2_alg_sel) &&
+ (!md_inst || t_alg->aead.maxauthsize > md_limit))
+ continue;
caam_aead_alg_init(t_alg);
@@ -3552,10 +3681,3 @@
return err;
}
-
-module_init(caam_algapi_init);
-module_exit(caam_algapi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for crypto API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index a408edd..aa9ccca 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*/
#include "compat.h"
@@ -32,12 +33,11 @@
}
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
+ append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT);
uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT |
+ OP_ALG_AAI_DK);
set_jump_tgt_here(desc, uncond_jump_cmd);
}
@@ -114,11 +114,9 @@
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
@@ -203,11 +201,9 @@
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
@@ -357,10 +353,9 @@
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
@@ -474,10 +469,9 @@
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
@@ -509,6 +503,7 @@
const bool is_qi, int era)
{
u32 geniv, moveiv;
+ u32 *wait_cmd;
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
@@ -604,6 +599,14 @@
/* Will read cryptlen */
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * Wait for IV transfer (ofifo -> class2) to finish before starting
+ * ciphertext transfer (ofifo -> external memory).
+ */
+ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_cmd);
+
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
@@ -612,11 +615,9 @@
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead givenc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
@@ -741,10 +742,9 @@
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
@@ -837,10 +837,9 @@
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
@@ -853,13 +852,16 @@
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_qi: true when called from caam/qi
+ *
+ * Input sequence: AAD | PTXT
+ * Output sequence: AAD | CTXT | ICV
+ * AAD length (assoclen), which includes the IV length, is available in Math3.
*/
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi)
{
- u32 *key_jump_cmd;
-
+ u32 *key_jump_cmd, *zero_cryptlen_jump_cmd, *skip_instructions;
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip key loading if it is loaded due to sharing */
@@ -902,24 +904,26 @@
append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* Read assoc data */
+ /* Skip AAD */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Read cryptlen and set this value into VARSEQOUTLEN */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* If cryptlen is ZERO jump to AAD command */
+ zero_cryptlen_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* Read AAD data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA);
+
/* Skip IV */
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, VARSEQOUTLEN, REG0, CAAM_CMD_SZ);
/* Write encrypted data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -928,15 +932,25 @@
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ /* Jump instructions to avoid double reading of AAD */
+ skip_instructions = append_jump(desc, JUMP_TEST_ALL);
+
+ /* There is no input data, cryptlen = 0 */
+ set_jump_tgt_here(desc, zero_cryptlen_jump_cmd);
+
+ /* Read AAD */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ set_jump_tgt_here(desc, skip_instructions);
+
/* Write ICV */
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
@@ -1029,11 +1043,9 @@
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
@@ -1114,11 +1126,9 @@
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
@@ -1204,19 +1214,147 @@
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
+/**
+ * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
+ * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
+ * descriptor (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
+ * OP_ALG_AAI_AEAD.
+ * @adata: pointer to authentication transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
+ * OP_ALG_AAI_AEAD.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @encap: true if encapsulation, false if decapsulation
+ * @is_qi: true when called from caam/qi
*/
-static inline void ablkcipher_append_src_dst(u32 *desc)
+void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool encap,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd, *wait_cmd;
+ u32 nfifo;
+ const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* For IPsec load the salt from keymat in the context register */
+ if (is_ipsec)
+ append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
+ LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
+ 4 << LDST_OFFSET_SHIFT);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 and 1 operations: Poly & ChaCha */
+ if (encap) {
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+ } else {
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ }
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+ u32 ctx1_iv_off = is_ipsec ? 8 : 4;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ 4 << LDST_OFFSET_SHIFT);
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ctx1_iv_off << LDST_OFFSET_SHIFT);
+ }
+
+ /*
+ * MAGIC with NFIFO
+ * Read associated data from the input and send them to class1 and
+ * class2 alignment blocks. From class1 send data to output fifo and
+ * then write it to memory since we don't need to encrypt AD.
+ */
+ nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
+ NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
+ append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
+ FIFOLD_CLASS_CLASS1 | LDST_VLF);
+ append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
+ MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ /* IPsec - copy IV at the output */
+ if (is_ipsec)
+ append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
+ 0x2 << 25);
+
+ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
+ JUMP_COND_NOP | JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, wait_cmd);
+
+ if (encap) {
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
+ CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ } else {
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0,
+ CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
+ CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV for verification */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ }
+
+ print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+}
+EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
+
+/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
+static inline void skcipher_append_src_dst(u32 *desc)
{
append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1226,18 +1364,19 @@
}
/**
- * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
+ * - OP_ALG_ALGSEL_CHACHA20
* @ivsize: initialization vector size
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1264,9 +1403,11 @@
set_jump_tgt_here(desc, key_jump_cmd);
- /* Load iv */
- append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
@@ -1276,33 +1417,38 @@
LDST_OFFSET_SHIFT));
/* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
/**
- * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
+ * - OP_ALG_ALGSEL_CHACHA20
* @ivsize: initialization vector size
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1329,9 +1475,11 @@
set_jump_tgt_here(desc, key_jump_cmd);
- /* load IV */
- append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
@@ -1342,111 +1490,33 @@
/* Choose operation */
if (ctx1_iv_off)
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
OP_ALG_DECRYPT);
else
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
/**
- * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
- * with HW-generated initialization vector.
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC.
- * @ivsize: initialization vector size
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- */
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
-{
- u32 *key_jump_cmd, geniv;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
- (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
-}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
-
-/**
- * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1481,24 +1551,25 @@
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store upper 8B of IV */
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
/**
- * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1532,15 +1603,17 @@
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ /* Store upper 8B of IV */
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+
+ print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM descriptor support");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index a917af5..f289339 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
* Copyright 2016 NXP
*/
@@ -12,7 +12,7 @@
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
@@ -31,7 +31,7 @@
#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 16 * CAAM_CMD_SZ)
#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
@@ -42,11 +42,11 @@
#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
+#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
+ 21 * CAAM_CMD_SZ)
+#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
+ 16 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era);
@@ -96,20 +96,21 @@
unsigned int ivsize, unsigned int icvsize,
const bool is_qi);
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool encap,
+ const bool is_qi);
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d7aa7d7..8e34496 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1,9 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale FSL CAAM support for crypto API over QI backend.
* Based on caamalg.c
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2019 NXP
*/
#include "compat.h"
@@ -35,6 +36,7 @@
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -43,6 +45,12 @@
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
@@ -50,7 +58,6 @@
struct device *jrdev;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
enum dma_data_direction dir;
@@ -98,6 +105,18 @@
ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
}
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ ctx->adata.key_virt = ctx->key;
+ ctx->adata.key_dma = ctx->key_dma;
+
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
data_len[0] = ctx->adata.keylen_pad;
data_len[1] = ctx->cdata.keylen;
@@ -111,16 +130,6 @@
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -136,16 +145,6 @@
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -164,16 +163,6 @@
ARRAY_SIZE(data_len)) < 0)
return -EINVAL;
- if (inl_mask & 1)
- ctx->adata.key_virt = ctx->key;
- else
- ctx->adata.key_dma = ctx->key_dma;
-
- if (inl_mask & 2)
- ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
- else
- ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-
ctx->adata.key_inline = !!(inl_mask & 1);
ctx->cdata.key_inline = !!(inl_mask & 2);
@@ -207,13 +196,11 @@
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
-#ifdef DEBUG
- dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/*
* If DKP is supported, use it in the shared descriptor to generate
@@ -230,7 +217,7 @@
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma,
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
ctx->adata.keylen_pad +
keys.enckeylen, ctx->dir);
goto skip_split_key;
@@ -244,13 +231,13 @@
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad + keys.enckeylen, 1);
-#endif
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->adata.keylen_pad + keys.enckeylen,
+ ctx->dir);
+
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
@@ -286,6 +273,23 @@
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ return err;
+
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
+ aead_setkey(aead, key, keylen);
+
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -332,6 +336,11 @@
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_gcm_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
gcm_set_sh_desc(authenc);
@@ -346,13 +355,18 @@
struct device *jrdev = ctx->jrdev;
int ret;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ ret = aes_check_keylen(keylen);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
+ ctx->dir);
ctx->cdata.keylen = keylen;
ret = gcm_set_sh_desc(aead);
@@ -428,6 +442,11 @@
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_rfc4106_check_authsize(authsize);
+ if (err)
+ return err;
ctx->authsize = authsize;
rfc4106_set_sh_desc(authenc);
@@ -442,13 +461,14 @@
struct device *jrdev = ctx->jrdev;
int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
/*
@@ -456,8 +476,8 @@
* in the nonce. Update the AES key length.
*/
ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->cdata.keylen, ctx->dir);
ret = rfc4106_set_sh_desc(aead);
if (ret)
@@ -533,6 +553,9 @@
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ if (authsize != 16)
+ return -EINVAL;
+
ctx->authsize = authsize;
rfc4543_set_sh_desc(authenc);
@@ -546,13 +569,14 @@
struct device *jrdev = ctx->jrdev;
int ret;
- if (keylen < 4)
- return -EINVAL;
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
memcpy(ctx->key, key, keylen);
/*
@@ -560,8 +584,8 @@
* in the nonce. Update the AES key length.
*/
ctx->cdata.keylen = keylen - 4;
- dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- ctx->dir);
+ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->cdata.keylen, ctx->dir);
ret = rfc4543_set_sh_desc(aead);
if (ret)
@@ -589,53 +613,30 @@
return 0;
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen, const u32 ctx1_iv_off)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ const bool is_rfc3686 = alg->caam.rfc3686;
int ret = 0;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
- /*
- * AES-CTR needs to load IV in CONTEXT1 reg
- * at an offset of 128bits (16bytes)
- * CONTEXT1[255:128] = IV
- */
- if (ctr_mode)
- ctx1_iv_off = 16;
-
- /*
- * RFC3686 specific:
- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
- * | *key = {KEY, NONCE}
- */
- if (is_rfc3686) {
- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
- keylen -= CTR_RFC3686_NONCE_SIZE;
- }
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
- ivsize, is_rfc3686, ctx1_iv_off);
+ /* skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -656,25 +657,92 @@
}
}
- if (ctx->drv_ctx[GIVENCRYPT]) {
- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
- ctx->sh_desc_givenc);
- if (ret) {
- dev_err(jrdev, "driver givenc context update failed\n");
- goto badkey;
- }
- }
-
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ ctx1_iv_off = 16;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des3_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
int ret = 0;
@@ -687,9 +755,9 @@
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts ablkcipher encrypt, decrypt shared descriptors */
- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+ /* xts skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -712,7 +780,7 @@
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -741,7 +809,7 @@
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
@@ -750,7 +818,7 @@
* @drv_req: driver-specific request structure
* @sgt: the h/w link table, followed by IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
@@ -781,14 +849,12 @@
if (type == ENCRYPT)
desc = ctx->sh_desc_enc;
- else if (type == DECRYPT)
+ else /* (type == DECRYPT) */
desc = ctx->sh_desc_dec;
- else /* (type == GIVENCRYPT) */
- desc = ctx->sh_desc_givenc;
cpu = smp_processor_id();
drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
- if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+ if (!IS_ERR_OR_NULL(drv_ctx))
drv_ctx->op_type = type;
ctx->drv_ctx[type] = drv_ctx;
@@ -803,21 +869,20 @@
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- enum optype op_type, dma_addr_t qm_sg_dma,
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize,
- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
}
@@ -830,21 +895,20 @@
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
}
static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -858,20 +922,8 @@
qidev = caam_ctx->qidev;
- if (unlikely(status)) {
- u32 ssrc = status & JRSTA_SSRC_MASK;
- u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
-
- caam_jr_strstatus(qidev, status);
- /*
- * verify hw auth check passed else return -EBADMSG
- */
- if (ssrc == JRSTA_SSRC_CCB_ERROR &&
- err_id == JRSTA_CCBERR_ERRID_ICVCHK)
- ecode = -EBADMSG;
- else
- ecode = -EIO;
- }
+ if (unlikely(status))
+ ecode = caam_jr_strstatus(qidev, status);
edesc = container_of(drv_req, typeof(*edesc), drv_req);
aead_unmap(qidev, edesc, aead_req);
@@ -894,6 +946,7 @@
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0;
@@ -902,10 +955,9 @@
int in_len, out_len;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
+ if (IS_ERR_OR_NULL(drv_ctx))
return (struct aead_edesc *)drv_ctx;
/* allocate space for base edesc and hw desc commands, link tables */
@@ -916,13 +968,13 @@
}
if (likely(req->src == req->dst)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
@@ -935,23 +987,21 @@
return ERR_PTR(-ENOMEM);
}
} else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen);
+ src_len);
qi_cache_free(edesc);
return ERR_PTR(src_nents);
}
- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
- req->cryptlen +
- (encrypt ? authsize :
- (-authsize)));
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
+ dst_len);
qi_cache_free(edesc);
return ERR_PTR(dst_nents);
}
@@ -968,13 +1018,19 @@
mapped_src_nents = 0;
}
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(qidev, req->dst,
+ dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
}
}
@@ -984,9 +1040,24 @@
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
* Input is not contiguous.
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
*/
- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
+ if (mapped_dst_nents > 1)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
+ 1 + !!ivsize + pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_ents = pad_sg_nents(qm_sg_ents);
+
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -994,7 +1065,7 @@
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1009,7 +1080,7 @@
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0, 0);
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1028,7 +1099,7 @@
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1039,19 +1110,18 @@
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
qm_sg_index++;
}
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- qm_sg_index, 0);
+ sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1074,7 +1144,7 @@
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
(1 + !!ivsize) * sizeof(*sg_table),
out_len, 0);
- } else if (mapped_dst_nents == 1) {
+ } else if (mapped_dst_nents <= 1) {
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
0);
} else {
@@ -1124,106 +1194,88 @@
static int ipsec_gcm_encrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_crypt(req, true);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
+ true);
}
static int ipsec_gcm_decrypt(struct aead_request *req)
{
- if (req->assoclen < 8)
- return -EINVAL;
-
- return aead_crypt(req, false);
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
+ false);
}
-static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
- struct ablkcipher_edesc *edesc;
- struct ablkcipher_request *req = drv_req->app_ctx;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct skcipher_request *req = drv_req->app_ctx;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = caam_ctx->qidev;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ecode = 0;
-#ifdef DEBUG
- dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
-#endif
+ dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
edesc = container_of(drv_req, typeof(*edesc), drv_req);
if (status)
- caam_jr_strstatus(qidev, status);
+ ecode = caam_jr_strstatus(qidev, status);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- edesc->src_nents > 1 ? 100 : ivsize, 1);
- caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
-#endif
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(qidev, edesc, req);
-
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
+ skcipher_unmap(qidev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
*/
- if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
- ivsize, ivsize, 0);
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
qi_cache_free(edesc);
- ablkcipher_request_complete(req, status);
+ skcipher_request_complete(req, ecode);
}
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, bool encrypt)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
+ if (IS_ERR_OR_NULL(drv_ctx))
+ return (struct skcipher_edesc *)drv_ctx;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
@@ -1253,14 +1305,26 @@
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
+ if (req->src != req->dst)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
+ else
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
+
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1269,20 +1333,20 @@
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
iv = (u8 *)(sg_table + qm_sg_ents);
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1292,22 +1356,24 @@
edesc->iv_dma = iv_dma;
edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.cbk = skcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
- if (mapped_dst_nents > 1)
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- dst_sg_idx, 0);
+ if (req->src != req->dst)
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
+
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
+ ivsize, 0);
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1315,218 +1381,35 @@
fd_sgt = &edesc->drv_req.fd_sgt[0];
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
- ivsize + req->nbytes, 0);
+ ivsize + req->cryptlen, 0);
- if (req->src == req->dst) {
+ if (req->src == req->dst)
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->nbytes, 0);
- } else if (mapped_dst_nents > 1) {
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), req->nbytes, 0);
- } else {
- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- req->nbytes, 0);
- }
-
- return edesc;
-}
-
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *qidev = ctx->qidev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- struct qm_sg_entry *sg_table, *fd_sgt;
- int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
- struct caam_drv_ctx *drv_ctx;
-
- drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- } else {
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- }
-
- qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = qm_sg_ents;
-
- qm_sg_ents += 1 + mapped_dst_nents;
- qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
- qm_sg_ents, ivsize);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
- if (!edesc) {
- dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* Make sure IV is located in a DMAable area */
- sg_table = &edesc->sgt[0];
- iv = (u8 *)(sg_table + qm_sg_ents);
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->iv_dma = iv_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
- edesc->drv_req.drv_ctx = drv_ctx;
-
- if (mapped_src_nents > 1)
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
-
- dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
- 0);
-
- edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
- DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
- dev_err(qidev, "unable to map S/G table\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- fd_sgt = &edesc->drv_req.fd_sgt[0];
-
- if (mapped_src_nents > 1)
- dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
+ sizeof(*sg_table), req->cryptlen + ivsize,
0);
else
- dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
- req->nbytes, 0);
-
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), ivsize + req->nbytes, 0);
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table), req->cryptlen + ivsize,
+ 0);
return edesc;
}
-static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
int ret;
+ if (!req->cryptlen)
+ return 0;
+
if (unlikely(caam_congested))
return -EAGAIN;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, encrypt);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /*
- * The crypto API expects us to set the IV (req->info) to the last
- * ciphertext block.
- */
- if (!encrypt)
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
- ivsize, ivsize, 0);
-
- ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
- qi_cache_free(edesc);
- }
-
- return ret;
-}
-
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
-{
- return ablkcipher_crypt(req, true);
-}
-
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
-{
- return ablkcipher_crypt(req, false);
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ret;
-
- if (unlikely(caam_congested))
- return -EAGAIN;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq);
+ edesc = skcipher_edesc_alloc(req, encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1534,129 +1417,128 @@
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
+ skcipher_unmap(ctx->qidev, edesc, req);
qi_cache_free(edesc);
}
return ret;
}
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
+static int skcipher_encrypt(struct skcipher_request *req)
+{
+ return skcipher_crypt(req, true);
+}
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static int skcipher_decrypt(struct skcipher_request *req)
+{
+ return skcipher_crypt(req, false);
+}
+
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aes_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam-qi",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam-qi",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = ctr_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc3686_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -1677,6 +1559,7 @@
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1695,6 +1578,7 @@
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -1714,6 +1598,7 @@
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
}
},
/* single-pass ipsec_esp descriptor */
@@ -1992,7 +1877,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2014,7 +1899,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2037,7 +1922,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2060,7 +1945,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2083,7 +1968,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2106,7 +1991,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2129,7 +2014,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2152,7 +2037,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2175,7 +2060,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2198,7 +2083,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2221,7 +2106,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2244,7 +2129,7 @@
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2528,16 +2413,11 @@
},
};
-struct caam_crypto_alg {
- struct list_head entry;
- struct crypto_alg crypto_alg;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
struct caam_drv_private *priv;
+ struct device *dev;
/*
* distribute tfms across job rings to ensure in-order
@@ -2549,16 +2429,17 @@
return PTR_ERR(ctx->jrdev);
}
- priv = dev_get_drvdata(ctx->jrdev->parent);
+ dev = ctx->jrdev->parent;
+ priv = dev_get_drvdata(dev);
if (priv->era >= 6 && uses_dkp)
ctx->dir = DMA_BIDIRECTIONAL;
else
ctx->dir = DMA_TO_DEVICE;
- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+ ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
ctx->dir);
- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
- dev_err(ctx->jrdev, "unable to map key\n");
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
return -ENOMEM;
}
@@ -2567,24 +2448,23 @@
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->qidev = priv->qidev;
+ ctx->qidev = dev;
spin_lock_init(&ctx->lock);
ctx->drv_ctx[ENCRYPT] = NULL;
ctx->drv_ctx[DECRYPT] = NULL;
- ctx->drv_ctx[GIVENCRYPT] = NULL;
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
- crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2594,24 +2474,23 @@
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam,
- alg->setkey == aead_setkey);
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
+ dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
+ ctx->dir);
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2619,10 +2498,8 @@
caam_exit_common(crypto_aead_ctx(tfm));
}
-static struct list_head alg_list;
-static void __exit caam_qi_algapi_exit(void)
+void caam_qi_algapi_exit(void)
{
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -2632,55 +2509,25 @@
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg = &t_alg->skcipher;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg)
- return ERR_PTR(-ENOMEM);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
-
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -2696,64 +2543,55 @@
alg->exit = caam_aead_exit;
}
-static int __init caam_qi_algapi_init(void)
+int caam_qi_algapi_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int i = 0, err = 0;
- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- of_node_put(dev_node);
- if (!pdev)
- return -ENODEV;
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv || !priv->qi_present)
- return -ENODEV;
-
- if (caam_dpaa2) {
- dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
- return -ENODEV;
- }
-
- INIT_LIST_HEAD(&alg_list);
+ /* Make sure this runs only on (DPAA 1.x) QI */
+ if (!priv->qi_present || caam_dpaa2)
+ return 0;
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ if (priv->era < 10) {
+ u32 cha_vid, cha_inst;
+
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
+ CHA_ID_LS_DES_SHIFT;
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ } else {
+ u32 aesa, mdha;
+
+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+
+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ aes_inst = aesa & CHA_VER_NUM_MASK;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ }
/* If MD is present, limit digest size based on LP256 */
- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -2765,23 +2603,16 @@
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- dev_warn(priv->qidev, "%s alg allocation failed\n",
- alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
- dev_warn(priv->qidev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
@@ -2807,8 +2638,7 @@
* Check support for AES algorithms not available
* on LP devices.
*/
- if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
- (alg_aai == OP_ALG_AAI_GCM))
+ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
continue;
/*
@@ -2833,14 +2663,7 @@
}
if (registered)
- dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+ dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
return err;
}
-
-module_init(caam_qi_algapi_init);
-module_exit(caam_qi_algapi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
-MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
new file mode 100644
index 0000000..3443f6d
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -0,0 +1,5470 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2019 NXP
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "caamalg_qi2.h"
+#include "dpseci_cmd.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "sg_sw_qm2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+#include "caamhash_desc.h"
+#include "dpseci-debugfs.h"
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+
+#define CAAM_CRA_PRIORITY 2000
+
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ * being processed. This can be added by the dpaa2-eth driver. This would
+ * pose a problem for userspace application processing which cannot
+ * know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+struct caam_alg_entry {
+ struct device *dev;
+ int class1_alg_type;
+ int class2_alg_type;
+ bool rfc3686;
+ bool geniv;
+ bool nodkp;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+/**
+ * caam_ctx - per-session context
+ * @flc: Flow Contexts array
+ * @key: [authentication key], encryption key
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @key_dma: I/O virtual address of the key
+ * @dir: DMA direction for mapping key and Flow Contexts
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+ * @cdata: encryption algorithm details
+ * @authsize: authentication tag (a.k.a. ICV / MAC) size
+ */
+struct caam_ctx {
+ struct caam_flc flc[NUM_OP];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t flc_dma[NUM_OP];
+ dma_addr_t key_dma;
+ enum dma_data_direction dir;
+ struct device *dev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+};
+
+static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
+ iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+/*
+ * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
+ *
+ * Allocate data on the hotpath. Instead of using kzalloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
+ * hosting 16 SG entries.
+ *
+ * @flags - flags that would be used for the equivalent kmalloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+static inline void *qi_cache_zalloc(gfp_t flags)
+{
+ return kmem_cache_zalloc(qi_cache, flags);
+}
+
+/*
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * @obj - buffer previously allocated by qi_cache_zalloc
+ *
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ */
+static inline void qi_cache_free(void *obj)
+{
+ kmem_cache_free(qi_cache, obj);
+}
+
+static struct caam_request *to_caam_req(struct crypto_async_request *areq)
+{
+ switch (crypto_tfm_alg_type(areq->tfm)) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return skcipher_request_ctx(skcipher_request_cast(areq));
+ case CRYPTO_ALG_TYPE_AEAD:
+ return aead_request_ctx(container_of(areq, struct aead_request,
+ base));
+ case CRYPTO_ALG_TYPE_AHASH:
+ return ahash_request_ctx(ahash_request_cast(areq));
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ int dst_nents, dma_addr_t iv_dma, int ivsize,
+ enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
+{
+ if (dst != src) {
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ if (dst_nents)
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+
+ if (qm_sg_bytes)
+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ u32 *nonce = NULL;
+ unsigned int data_len[2];
+ u32 inl_mask;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ ctx->adata.key_virt = ctx->key;
+ ctx->adata.key_dma = ctx->key_dma;
+
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ /* aead_encrypt shared descriptor */
+ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
+ DESC_QI_AEAD_ENC_LEN) +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+
+ if (alg->caam.geniv)
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686,
+ nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ else
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true, priv->sec_attr.era);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+ dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_set_sh_desc(aead);
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+}
+
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
+ aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_request *req_ctx = aead_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ int src_len, dst_len = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ unsigned int authsize = ctx->authsize;
+ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct dpaa2_sg_entry *sg_table;
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = src_len + (encrypt ? authsize : (-authsize));
+
+ src_nents = sg_nents_for_len(req->src, src_len);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ src_len);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, dst_len);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ dst_len);
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ if (dst_nents) {
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_dst_nents = 0;
+ }
+ } else {
+ src_len = req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0);
+
+ src_nents = sg_nents_for_len(req->src, src_len);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ src_len);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+ ivsize = crypto_aead_ivsize(aead);
+
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
+ */
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
+ if (mapped_dst_nents > 1)
+ qm_sg_nents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
+ 1 + !!ivsize +
+ pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_nents = pad_sg_nents(qm_sg_nents);
+
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+ CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_nents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (ivsize) {
+ u8 *iv = (u8 *)(sg_table + qm_sg_nents);
+
+ /* Make sure IV is located in a DMAable area */
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, DMA_NONE, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
+ if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
+ /*
+ * The associated data comes already with the IV but we need
+ * to skip it when we authenticate or encrypt...
+ */
+ edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
+ else
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ dev_err(dev, "unable to map assoclen\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+ qm_sg_index++;
+ if (ivsize) {
+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+ qm_sg_index++;
+ }
+ sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->assoclen + req->cryptlen +
+ (encrypt ? ctx->authsize : (-ctx->authsize));
+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, in_len);
+
+ if (req->dst == req->src) {
+ if (mapped_src_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table));
+ }
+ } else if (!mapped_dst_nents) {
+ /*
+ * crypto engine requires the output entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type),
+ * leaving out_fle zeroized is the best option.
+ */
+ goto skip_out_fle;
+ } else if (mapped_dst_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
+ sizeof(*sg_table));
+ }
+
+ dpaa2_fl_set_len(out_fle, out_len);
+
+skip_out_fle:
+ return edesc;
+}
+
+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, true, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, false, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int chachapoly_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+
+ ctx->authsize = authsize;
+ return chachapoly_set_sh_desc(aead);
+}
+
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+
+ if (keylen != CHACHA_KEY_SIZE + saltlen) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.key_virt = key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_gcm_check_authsize(authsize);
+ if (err)
+ return err;
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ int ret;
+
+ ret = aes_check_keylen(keylen);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
+ return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+ int err;
+
+ err = crypto_rfc4106_check_authsize(authsize);
+ if (err)
+ return err;
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ int ret;
+
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ if (authsize != 16)
+ return -EINVAL;
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ int ret;
+
+ ret = aes_check_keylen(keylen - 4);
+ if (ret) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return ret;
+ }
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4543_set_sh_desc(aead);
+}
+
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen, const u32 ctx1_iv_off)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher),
+ struct caam_skcipher_alg, skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ u32 *desc;
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 ctx1_iv_off;
+ int err;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ ctx1_iv_off = 16;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
+}
+
+static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != CHACHA_KEY_SIZE) {
+ crypto_skcipher_set_flags(skcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return verify_skcipher_des3_key(skcipher, key) ?:
+ skcipher_setkey(skcipher, key, keylen, 0);
+}
+
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ dev_err(dev, "key size mismatch\n");
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* xts_skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* xts_skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_request *req_ctx = skcipher_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct skcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ u8 *iv;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->cryptlen);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ qm_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = qm_sg_ents;
+
+ /*
+ * Input, output HW S/G tables: [IV, src][dst, IV]
+ * IV entries point to the same buffer
+ * If src == dst, S/G entries are reused (S/G tables overlap)
+ *
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries.
+ */
+ if (req->src != req->dst)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
+ else
+ qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
+
+ qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
+
+ if (req->src != req->dst)
+ sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
+
+ dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
+ ivsize, 0);
+
+ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
+ dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
+
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+
+ if (req->src == req->dst)
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
+ sizeof(*sg_table));
+ else
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table));
+
+ return edesc;
+}
+
+static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
+}
+
+static void aead_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static void aead_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = aead_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = aead_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
+}
+
+static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
+
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg("dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block (CBC mode) or last counter (CTR mode).
+ * This is used e.g. by the CTS mode.
+ */
+ if (!ecode)
+ memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
+ ivsize);
+
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static int skcipher_encrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ret;
+
+ if (!req->cryptlen)
+ return 0;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = skcipher_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int skcipher_decrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ret;
+
+ if (!req->cryptlen)
+ return 0;
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = skcipher_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
+{
+ dma_addr_t dma_addr;
+ int i;
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+ ctx->dev = caam->dev;
+ ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
+ offsetof(struct caam_ctx, flc_dma),
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
+
+ return 0;
+}
+
+static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+}
+
+static int caam_cra_init_aead(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ aead);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
+ !caam_alg->caam.nodkp);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
+ offsetof(struct caam_ctx, flc_dma), ctx->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void caam_cra_exit(struct crypto_skcipher *tfm)
+{
+ caam_exit_common(crypto_skcipher_ctx(tfm));
+}
+
+static void caam_cra_exit_aead(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct caam_skcipher_alg driver_algs[] = {
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aes_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = des_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = ctr_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc3686_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "chacha20",
+ .cra_driver_name = "chacha20-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = chacha20_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
+ },
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
+ }
+ },
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = des3_aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-desi-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(md5),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha1),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha224),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha256),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha384),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
+ "caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ .nodkp = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539esp-chacha20-"
+ "poly1305-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 8,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ .nodkp = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha512),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+};
+
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+{
+ struct skcipher_alg *alg = &t_alg->skcipher;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_skcipher;
+ alg->exit = caam_cra_exit;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_aead;
+ alg->exit = caam_cra_exit_aead;
+}
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN 8
+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+enum hash_optype {
+ UPDATE = 0,
+ UPDATE_FIRST,
+ FINALIZE,
+ DIGEST,
+ HASH_NUM_OP
+};
+
+/**
+ * caam_hash_ctx - ahash per-session context
+ * @flc: Flow Contexts array
+ * @key: authentication key
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @dev: dpseci device
+ * @ctx_len: size of Context Register
+ * @adata: hashing algorithm details
+ */
+struct caam_hash_ctx {
+ struct caam_flc flc[HASH_NUM_OP];
+ u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ dma_addr_t flc_dma[HASH_NUM_OP];
+ struct device *dev;
+ int ctx_len;
+ struct alginfo adata;
+};
+
+/* ahash state */
+struct caam_hash_state {
+ struct caam_request caam_req;
+ dma_addr_t buf_dma;
+ dma_addr_t ctx_dma;
+ int ctx_dma_len;
+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_0;
+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_1;
+ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int current_buf;
+};
+
+struct caam_export_state {
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
+ u8 caam_ctx[MAX_CTX_LEN];
+ int buflen;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+};
+
+static inline void switch_buf(struct caam_hash_state *state)
+{
+ state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_qm_sg(struct device *dev,
+ struct dpaa2_sg_entry *qm_sg,
+ struct caam_hash_state *state)
+{
+ int buflen = *current_buflen(state);
+
+ if (!buflen)
+ return 0;
+
+ state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, state->buf_dma)) {
+ dev_err(dev, "unable to map buf\n");
+ state->buf_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
+
+ return 0;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline int ctx_map_to_qm_sg(struct device *dev,
+ struct caam_hash_state *state, int ctx_len,
+ struct dpaa2_sg_entry *qm_sg, u32 flag)
+{
+ state->ctx_dma_len = ctx_len;
+ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
+ if (dma_mapping_error(dev, state->ctx_dma)) {
+ dev_err(dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
+
+ return 0;
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
+ struct caam_flc *flc;
+ u32 *desc;
+
+ /* ahash_update shared descriptor */
+ flc = &ctx->flc[UPDATE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_update_first shared descriptor */
+ flc = &ctx->flc[UPDATE_FIRST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_final shared descriptor */
+ flc = &ctx->flc[FINALIZE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_digest shared descriptor */
+ flc = &ctx->flc[DIGEST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ return 0;
+}
+
+struct split_key_sh_result {
+ struct completion completion;
+ int err;
+ struct device *dev;
+};
+
+static void split_key_sh_done(void *cbk_ctx, u32 err)
+{
+ struct split_key_sh_result *res = cbk_ctx;
+
+ dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+
+ res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
+ complete(&res->completion);
+}
+
+/* Digest hash size if it is too large */
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+ u32 digestsize)
+{
+ struct caam_request *req_ctx;
+ u32 *desc;
+ struct split_key_sh_result result;
+ dma_addr_t key_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ int ret = -ENOMEM;
+ struct dpaa2_fl_entry *in_fle, *out_fle;
+
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ if (!req_ctx)
+ return -ENOMEM;
+
+ in_fle = &req_ctx->fd_flt[1];
+ out_fle = &req_ctx->fd_flt[0];
+
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ if (!flc)
+ goto err_flc;
+
+ key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(ctx->dev, key_dma)) {
+ dev_err(ctx->dev, "unable to map key memory\n");
+ goto err_key_dma;
+ }
+
+ desc = flc->sh_desc;
+
+ init_sh_desc(desc, 0);
+
+ /* descriptor to perform unkeyed hash on key_in */
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, flc_dma)) {
+ dev_err(ctx->dev, "unable to map shared descriptor\n");
+ goto err_flc_dma;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, key_dma);
+ dpaa2_fl_set_len(in_fle, *keylen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, key_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+ print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ result.err = 0;
+ init_completion(&result.completion);
+ result.dev = ctx->dev;
+
+ req_ctx->flc = flc;
+ req_ctx->flc_dma = flc_dma;
+ req_ctx->cbk = split_key_sh_done;
+ req_ctx->ctx = &result;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS) {
+ /* in progress */
+ wait_for_completion(&result.completion);
+ ret = result.err;
+ print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
+ digestsize, 1);
+ }
+
+ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
+ DMA_TO_DEVICE);
+err_flc_dma:
+ dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
+err_key_dma:
+ kfree(flc);
+err_flc:
+ kfree(req_ctx);
+
+ *keylen = digestsize;
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ int ret;
+ u8 *hashed_key = NULL;
+
+ dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
+
+ if (keylen > blocksize) {
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ if (!hashed_key)
+ return -ENOMEM;
+ ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
+ if (ret)
+ goto bad_free_key;
+ key = hashed_key;
+ }
+
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ ctx->adata.key_virt = key;
+ ctx->adata.key_inline = true;
+
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm> would result
+ * in invalid opcodes (last bytes of user key) in the resulting
+ * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
+ * addresses are needed.
+ */
+ if (keylen > ctx->adata.keylen_pad) {
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
+ ctx->adata.keylen_pad,
+ DMA_TO_DEVICE);
+ }
+
+ ret = ahash_set_sh_desc(ahash);
+ kfree(hashed_key);
+ return ret;
+bad_free_key:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
+ struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (edesc->src_nents)
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+
+ if (edesc->qm_sg_bytes)
+ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+
+ if (state->buf_dma) {
+ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ DMA_TO_DEVICE);
+ state->buf_dma = 0;
+ }
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, u32 flag)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (state->ctx_dma) {
+ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
+ state->ctx_dma = 0;
+ }
+ ahash_unmap(dev, edesc, req);
+}
+
+static void ahash_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ memcpy(req->result, state->caam_ctx, digestsize);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_bi(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ memcpy(req->result, state->caam_ctx, digestsize);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status))
+ ecode = caam_qi2_strstatus(ctx->dev, status);
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static int ahash_update_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state), last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ last_buflen = *next_buflen;
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (*buflen ? 1 : 0);
+ qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ if (mapped_nents) {
+ sg_to_qm_sg_last(req->src, src_len,
+ sg_table + qm_sg_src_index, 0);
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+ } else {
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
+ true);
+ }
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE];
+ req_ctx->cbk = ahash_done_bi;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = last_buflen;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return -ENOMEM;
+
+ qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ state->buf_dma = 0;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return ret;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ edesc->src_nents = src_nents;
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
+
+ qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ goto unmap;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int buflen = *current_buflen(state);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return ret;
+
+ if (buflen) {
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
+ }
+
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ /*
+ * crypto engine requires the input entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type), leaving
+ * in_fle zeroized (except for "Final" flag) is the best option.
+ */
+ if (buflen) {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ }
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int in_len = *buflen + req->nbytes, to_hash;
+ int qm_sg_bytes, src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
+ sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ state->ctx_dma_len = ctx->ctx_len;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = 0;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap;
+
+ sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap;
+
+ return ret;
+unmap:
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return -ENOMEM;
+}
+
+static int ahash_update_first(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int to_hash;
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+ 1);
+ to_hash = req->nbytes - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ sg_table = &edesc->sgt[0];
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, to_hash);
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+
+ sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
+ qm_sg_bytes = pad_sg_nents(mapped_nents) *
+ sizeof(*sg_table);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+ *next_buflen, 0);
+
+ state->ctx_dma_len = ctx->ctx_len;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags &
+ CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ state->update = ahash_update_no_ctx;
+ state->finup = ahash_finup_no_ctx;
+ state->final = ahash_final_no_ctx;
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
+ switch_buf(state);
+ }
+
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+ return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ state->update = ahash_update_first;
+ state->finup = ahash_finup_first;
+ state->final = ahash_final_no_ctx;
+
+ state->ctx_dma = 0;
+ state->ctx_dma_len = 0;
+ state->current_buf = 0;
+ state->buf_dma = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
+
+ return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_export_state *export = out;
+ int len;
+ u8 *buf;
+
+ if (state->current_buf) {
+ buf = state->buf_1;
+ len = state->buflen_1;
+ } else {
+ buf = state->buf_0;
+ len = state->buflen_0;
+ }
+
+ memcpy(export->buf, buf, len);
+ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
+ export->buflen = len;
+ export->update = state->update;
+ export->final = state->final;
+ export->finup = state->finup;
+
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ const struct caam_export_state *export = in;
+
+ memset(state, 0, sizeof(*state));
+ memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
+ state->buflen_0 = export->buflen;
+ state->update = export->update;
+ state->final = export->final;
+ state->finup = export->finup;
+
+ return 0;
+}
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-caam-qi2",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-caam-qi2",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam-qi2",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-caam-qi2",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam-qi2",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-caam-qi2",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam-qi2",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-caam-qi2",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam-qi2",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-caam-qi2",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam-qi2",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-caam-qi2",
+ .blocksize = MD5_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ }
+};
+
+struct caam_hash_alg {
+ struct list_head entry;
+ struct device *dev;
+ int alg_type;
+ struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+ HASH_MSG_LEN + 32,
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ dma_addr_t dma_addr;
+ int i;
+
+ ctx->dev = caam_hash->dev;
+
+ if (alg->setkey) {
+ ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
+ ARRAY_SIZE(ctx->key),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
+ dev_err(ctx->dev, "unable to map key\n");
+ return -ENOMEM;
+ }
+ }
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map shared descriptors\n");
+ if (ctx->adata.key_dma)
+ dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key),
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HASH_NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+
+ /* copy descriptor header template value */
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ return ahash_set_sh_desc(ahash);
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ctx->adata.key_dma)
+ dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
+ struct caam_hash_template *template, bool keyed)
+{
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->dev = dev;
+
+ return t_alg;
+}
+
+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+
+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
+ napi_schedule_irqoff(&ppriv->napi);
+}
+
+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->priv = priv;
+ nctx = &ppriv->nctx;
+ nctx->is_cdan = 0;
+ nctx->id = ppriv->rsp_fqid;
+ nctx->desired_cpu = cpu;
+ nctx->cb = dpaa2_caam_fqdan_cb;
+
+ /* Register notification callbacks */
+ ppriv->dpio = dpaa2_io_service_select(cpu);
+ err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
+ if (unlikely(err)) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
+ /*
+ * If no affine DPIO for this core, there's probably
+ * none available for next cores either. Signal we want
+ * to retry later, in case the DPIO devices weren't
+ * probed yet.
+ */
+ err = -EPROBE_DEFER;
+ goto err;
+ }
+
+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
+ dev);
+ if (unlikely(!ppriv->store)) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err:
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->nctx.cb)
+ break;
+ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
+ }
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->store)
+ break;
+ dpaa2_io_store_destroy(ppriv->store);
+ }
+
+ return err;
+}
+
+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
+ priv->dev);
+ dpaa2_io_store_destroy(ppriv->store);
+
+ if (++i == priv->num_pairs)
+ return;
+ }
+}
+
+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
+{
+ struct dpseci_rx_queue_cfg rx_queue_cfg;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i = 0, cpu;
+
+ /* Configure Rx queues */
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+
+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
+ DPSECI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.order_preservation_en = 0;
+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ /*
+ * Rx priority (WQ) doesn't really matter, since we use
+ * pull mode, i.e. volatile dequeues from specific FQs
+ */
+ rx_queue_cfg.dest_cfg.priority = 0;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+
+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
+ err);
+ return err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return err;
+}
+
+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+
+ if (!priv->cscn_mem)
+ return;
+
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ kfree(priv->cscn_mem);
+}
+
+static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+
+ dpaa2_dpseci_congestion_free(priv);
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
+ const struct dpaa2_fd *fd)
+{
+ struct caam_request *req;
+ u32 fd_err;
+
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
+ dev_err(priv->dev, "Only Frame List FD format is supported!\n");
+ return;
+ }
+
+ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
+ if (unlikely(fd_err))
+ dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
+
+ /*
+ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
+ * in FD[ERR] or FD[FRC].
+ */
+ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
+ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
+}
+
+static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ int err;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err == -EBUSY);
+
+ if (unlikely(err))
+ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ struct dpaa2_dq *dq;
+ int cleaned = 0, is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ if (unlikely(!dq)) {
+ if (unlikely(!is_last)) {
+ dev_dbg(ppriv->priv->dev,
+ "FQ %d returned no valid frames\n",
+ ppriv->rsp_fqid);
+ /*
+ * MUST retry until we get some sort of
+ * valid response token (be it "empty dequeue"
+ * or a valid frame).
+ */
+ continue;
+ }
+ break;
+ }
+
+ /* Process FD */
+ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct dpaa2_caam_priv *priv;
+ int err, cleaned = 0, store_cleaned;
+
+ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
+ priv = ppriv->priv;
+
+ if (unlikely(dpaa2_caam_pull_fq(ppriv)))
+ return 0;
+
+ do {
+ store_cleaned = dpaa2_caam_store_consume(ppriv);
+ cleaned += store_cleaned;
+
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_CAAM_STORE_SIZE)
+ break;
+
+ /* Try to dequeue some more */
+ err = dpaa2_caam_pull_fq(ppriv);
+ if (unlikely(err))
+ break;
+ } while (1);
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
+ if (unlikely(err))
+ dev_err(priv->dev, "Notification rearm failed: %d\n",
+ err);
+ }
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
+ u16 token)
+{
+ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
+ struct device *dev = priv->dev;
+ int err;
+
+ /*
+ * Congestion group feature supported starting with DPSECI API v5.1
+ * and only when object has been created with this capability.
+ */
+ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
+ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
+ return 0;
+
+ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->cscn_mem)
+ return -ENOMEM;
+
+ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, priv->cscn_dma)) {
+ dev_err(dev, "Error mapping CSCN memory area\n");
+ err = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
+ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
+ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
+ cong_notif_cfg.message_ctx = (uintptr_t)priv;
+ cong_notif_cfg.message_iova = priv->cscn_dma;
+ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
+ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
+ DPSECI_CGN_MODE_COHERENT_WRITE;
+
+ err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
+ &cong_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_congestion_notification failed\n");
+ goto err_set_cong;
+ }
+
+ return 0;
+
+err_set_cong:
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+err_dma_map:
+ kfree(priv->cscn_mem);
+
+ return err;
+}
+
+static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_caam_priv *priv;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, cpu;
+ u8 i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpsec_id = ls_dev->obj_desc.id;
+
+ /* Get a handle for the DPSECI this interface is associate with */
+ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_open() failed: %d\n", err);
+ goto err_open;
+ }
+
+ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
+ &priv->minor_ver);
+ if (err) {
+ dev_err(dev, "dpseci_get_api_version() failed\n");
+ goto err_get_vers;
+ }
+
+ dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
+
+ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpseci_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_attributes() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->sec_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_sec_attr() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "setup_congestion() failed\n");
+ goto err_get_vers;
+ }
+
+ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
+ priv->dpseci_attr.num_tx_queues);
+ if (priv->num_pairs > num_online_cpus()) {
+ dev_warn(dev, "%d queues won't be used\n",
+ priv->num_pairs - num_online_cpus());
+ priv->num_pairs = num_online_cpus();
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
+ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_rx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
+ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->tx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_tx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ u8 j;
+
+ j = i % priv->num_pairs;
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
+
+ /*
+ * Allow all cores to enqueue, while only some of them
+ * will take part in dequeuing.
+ */
+ if (++i > priv->num_pairs)
+ continue;
+
+ ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
+ ppriv->prio = j;
+
+ dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
+ priv->rx_queue_attr[j].fqid,
+ priv->tx_queue_attr[j].fqid);
+
+ ppriv->net_dev.dev = *dev;
+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
+ }
+
+ return 0;
+
+err_get_rx_queue:
+ dpaa2_dpseci_congestion_free(priv);
+err_get_vers:
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+err_open:
+ return err;
+}
+
+static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_enable(&ppriv->napi);
+ }
+
+ return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int i, err = 0, enabled;
+
+ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_disable() failed\n");
+ return err;
+ }
+
+ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
+ if (err) {
+ dev_err(dev, "dpseci_is_enabled() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_disable(&ppriv->napi);
+ netif_napi_del(&ppriv->napi);
+ }
+
+ return 0;
+}
+
+static struct list_head hash_list;
+
+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i, err = 0;
+ bool registered = false;
+
+ /*
+ * There is no way to get CAAM endianness - there is no direct register
+ * space access and MC f/w does not provide this attribute.
+ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
+ * property.
+ */
+ caam_little_end = true;
+
+ caam_imx = false;
+
+ dev = &dpseci_dev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->domain = iommu_get_domain_for_dev(dev);
+
+ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ 0, SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(dev, "Can't allocate SEC cache\n");
+ return -ENOMEM;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ if (err) {
+ dev_err(dev, "dma_set_mask_and_coherent() failed\n");
+ goto err_dma_mask;
+ }
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+
+ goto err_dma_mask;
+ }
+
+ priv->ppriv = alloc_percpu(*priv->ppriv);
+ if (!priv->ppriv) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto err_alloc_ppriv;
+ }
+
+ /* DPSECI initialization */
+ err = dpaa2_dpseci_setup(dpseci_dev);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
+ goto err_dpseci_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_dpseci_dpio_setup(priv);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPSECI binding to DPIO */
+ err = dpaa2_dpseci_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPSECI enable */
+ err = dpaa2_dpseci_enable(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_enable() failed\n");
+ goto err_bind;
+ }
+
+ dpaa2_dpseci_debugfs_init(priv);
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (alg_sel == OP_ALG_ALGSEL_3DES ||
+ alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
+ !priv->sec_attr.ccha_acc_num)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_skcipher_alg_init(t_alg);
+
+ err = crypto_register_skcipher(&t_alg->skcipher);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->skcipher.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
+ c1_alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ c1_alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
+ !priv->sec_attr.ccha_acc_num)
+ continue;
+
+ /* Skip POLY1305 algorithms if not supported by device */
+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
+ !priv->sec_attr.ptha_acc_num)
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD not supported by device.
+ */
+ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ !priv->sec_attr.md_acc_num)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->aead.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+ if (registered)
+ dev_info(dev, "algorithms registered in /proc/crypto\n");
+
+ /* register hash algorithms the device supports */
+ INIT_LIST_HEAD(&hash_list);
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+ if (!priv->sec_attr.md_acc_num)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ struct caam_hash_alg *t_alg;
+ struct caam_hash_template *alg = driver_hash + i;
+
+ /* register hmac version */
+ t_alg = caam_hash_alloc(dev, alg, true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s hash alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+
+ /* register unkeyed version */
+ t_alg = caam_hash_alloc(dev, alg, false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+ }
+ if (!list_empty(&hash_list))
+ dev_info(dev, "hash algorithms registered in /proc/crypto\n");
+
+ return err;
+
+err_bind:
+ dpaa2_dpseci_dpio_free(priv);
+err_dpio_setup:
+ dpaa2_dpseci_free(priv);
+err_dpseci_setup:
+ free_percpu(priv->ppriv);
+err_alloc_ppriv:
+ fsl_mc_portal_free(priv->mc_io);
+err_dma_mask:
+ kmem_cache_destroy(qi_cache);
+
+ return err;
+}
+
+static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ dpaa2_dpseci_debugfs_exit(priv);
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
+ }
+
+ if (hash_list.next) {
+ struct caam_hash_alg *t_hash_alg, *p;
+
+ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+ }
+
+ dpaa2_dpseci_disable(priv);
+ dpaa2_dpseci_dpio_free(priv);
+ dpaa2_dpseci_free(priv);
+ free_percpu(priv->ppriv);
+ fsl_mc_portal_free(priv->mc_io);
+ kmem_cache_destroy(qi_cache);
+
+ return 0;
+}
+
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
+{
+ struct dpaa2_fd fd;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i;
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (priv->cscn_mem) {
+ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
+ DPAA2_CSCN_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ dev_dbg_ratelimited(dev, "Dropping request\n");
+ return -EBUSY;
+ }
+ }
+
+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
+
+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, req->fd_flt_dma)) {
+ dev_err(dev, "DMA mapping error for QI enqueue request\n");
+ goto err_out;
+ }
+
+ memset(&fd, 0, sizeof(fd));
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
+ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
+ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
+ ppriv = this_cpu_ptr(priv->ppriv);
+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
+ err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
+ &fd);
+ if (err != -EBUSY)
+ break;
+
+ cpu_relax();
+ }
+
+ if (unlikely(err)) {
+ dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
+ goto err_out;
+ }
+
+ return -EINPROGRESS;
+
+err_out:
+ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ return -EIO;
+}
+EXPORT_SYMBOL(dpaa2_caam_enqueue);
+
+static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpseci",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_caam_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_caam_probe,
+ .remove = dpaa2_caam_remove,
+ .match_id_table = dpaa2_caam_match_id_table
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
+
+module_fsl_mc_driver(dpaa2_caam_driver);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
new file mode 100644
index 0000000..7067367
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _CAAMALG_QI2_H_
+#define _CAAMALG_QI2_H_
+
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+#include <linux/threads.h>
+#include <linux/netdevice.h>
+#include "dpseci.h"
+#include "desc_constr.h"
+
+#define DPAA2_CAAM_STORE_SIZE 16
+/* NAPI weight *must* be a multiple of the store size. */
+#define DPAA2_CAAM_NAPI_WEIGHT 512
+
+/* The congestion entrance threshold was chosen so that on LS2088
+ * we support the maximum throughput for the available memory
+ */
+#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
+#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
+
+/**
+ * dpaa2_caam_priv - driver private data
+ * @dpseci_id: DPSECI object unique ID
+ * @major_ver: DPSECI major version
+ * @minor_ver: DPSECI minor version
+ * @dpseci_attr: DPSECI attributes
+ * @sec_attr: SEC engine attributes
+ * @rx_queue_attr: array of Rx queue attributes
+ * @tx_queue_attr: array of Tx queue attributes
+ * @cscn_mem: pointer to memory region containing the congestion SCN
+ * it's size is larger than to accommodate alignment
+ * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
+ * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
+ * @cscn_dma: dma address used by the QMAN to write CSCN messages
+ * @dev: device associated with the DPSECI object
+ * @mc_io: pointer to MC portal's I/O object
+ * @domain: IOMMU domain
+ * @ppriv: per CPU pointers to privata data
+ */
+struct dpaa2_caam_priv {
+ int dpsec_id;
+
+ u16 major_ver;
+ u16 minor_ver;
+
+ struct dpseci_attr dpseci_attr;
+ struct dpseci_sec_attr sec_attr;
+ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ int num_pairs;
+
+ /* congestion */
+ void *cscn_mem;
+ void *cscn_mem_aligned;
+ dma_addr_t cscn_dma;
+
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct iommu_domain *domain;
+
+ struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
+ struct dentry *dfs_root;
+};
+
+/**
+ * dpaa2_caam_priv_per_cpu - per CPU private data
+ * @napi: napi structure
+ * @net_dev: netdev used by napi
+ * @req_fqid: (virtual) request (Tx / enqueue) FQID
+ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
+ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
+ * @nctx: notification context of response FQ
+ * @store: where dequeued frames are stored
+ * @priv: backpointer to dpaa2_caam_priv
+ * @dpio: portal used for data path operations
+ */
+struct dpaa2_caam_priv_per_cpu {
+ struct napi_struct napi;
+ struct net_device net_dev;
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+ struct dpaa2_io_notification_ctx nctx;
+ struct dpaa2_io_store *store;
+ struct dpaa2_caam_priv *priv;
+ struct dpaa2_io *dpio;
+};
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 512
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @sgt: the h/w link table, followed by IV
+ */
+struct aead_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
+ dma_addr_t assoclen_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * skcipher_edesc - s/w-extended skcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @sgt: the h/w link table, followed by IV
+ */
+struct skcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @src_nents: number of segments in input scatterlist
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @sgt: pointer to h/w link table
+ */
+struct ahash_edesc {
+ dma_addr_t qm_sg_dma;
+ int src_nents;
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/**
+ * caam_flc - Flow Context (FLC)
+ * @flc: Flow Context options
+ * @sh_desc: Shared Descriptor
+ */
+struct caam_flc {
+ u32 flc[16];
+ u32 sh_desc[MAX_SDLEN];
+} ____cacheline_aligned;
+
+enum optype {
+ ENCRYPT = 0,
+ DECRYPT,
+ NUM_OP
+};
+
+/**
+ * caam_request - the request structure the driver application should fill while
+ * submitting a job to driver.
+ * @fd_flt: Frame list table defining input and output
+ * fd_flt[0] - FLE pointing to output buffer
+ * fd_flt[1] - FLE pointing to input buffer
+ * @fd_flt_dma: DMA address for the frame list table
+ * @flc: Flow Context
+ * @flc_dma: I/O virtual address of Flow Context
+ * @cbk: Callback function to invoke when job is completed
+ * @ctx: arbit context attached with request by the application
+ * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
+ */
+struct caam_request {
+ struct dpaa2_fl_entry fd_flt[2];
+ dma_addr_t fd_flt_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ void (*cbk)(void *ctx, u32 err);
+ void *ctx;
+ void *edesc;
+};
+
+/**
+ * dpaa2_caam_enqueue() - enqueue a crypto request
+ * @dev: device associated with the DPSECI object
+ * @req: pointer to caam_request
+ */
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
+
+#endif /* _CAAMALG_QI2_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 43975ab..65399cb 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,7 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2018-2019 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -62,6 +64,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamhash_desc.h"
#define CAAM_CRA_PRIORITY 3000
@@ -71,14 +74,6 @@
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-/* length of descriptors text */
-#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -87,14 +82,6 @@
#define HASH_MSG_LEN 8
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
-#ifdef DEBUG
-/* for print_hex_dumps with line references */
-#define debug(format, arg...) printk(format, arg)
-#else
-#define debug(format, arg...)
-#endif
-
-
static struct list_head hash_list;
/* ahash per-session context */
@@ -103,13 +90,14 @@
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
enum dma_data_direction dir;
+ enum dma_data_direction key_dir;
struct device *jrdev;
- u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
struct alginfo adata;
};
@@ -118,6 +106,7 @@
struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
+ int ctx_dma_len;
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -163,6 +152,11 @@
return state->current_buf ? &state->buflen_0 : &state->buflen_1;
}
+static inline bool is_cmac_aes(u32 algtype)
+{
+ return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
+ (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
+}
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -170,6 +164,7 @@
struct caam_hash_state *state,
int ctx_len)
{
+ state->ctx_dma_len = ctx_len;
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, state->ctx_dma)) {
@@ -183,18 +178,6 @@
return 0;
}
-/* Map req->result, and append seq_out_ptr command that points to it */
-static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
- u8 *result, int digestsize)
-{
- dma_addr_t dst_dma;
-
- dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
- append_seq_out_ptr(desc, dst_dma, digestsize, 0);
-
- return dst_dma;
-}
-
/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
@@ -223,6 +206,7 @@
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
+ state->ctx_dma_len = ctx_len;
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
if (dma_mapping_error(jrdev, state->ctx_dma)) {
dev_err(jrdev, "unable to map ctx\n");
@@ -235,60 +219,6 @@
return 0;
}
-/*
- * For ahash update, final and finup (import_ctx = true)
- * import context, read and write to seqout
- * For ahash firsts and digest (import_ctx = false)
- * read and write to seqout
- */
-static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx,
- int era)
-{
- u32 op = ctx->adata.algtype;
- u32 *skip_key_load;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Append key if it has been set; ahash update excluded */
- if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
- /* Skip key loading if already shared */
- skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- if (era < 6)
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_proto_dkp(desc, &ctx->adata);
-
- set_jump_tgt_here(desc, skip_key_load);
-
- op |= OP_ALG_AAI_HMAC_PRECOMP;
- }
-
- /* If needed, import context from software */
- if (import_ctx)
- append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
- /*
- * Load from buf and/or src and write to req->result or state->context
- * Calculate remaining bytes to read
- */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
-
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -301,64 +231,160 @@
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash update first shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
+ ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ahash digest shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ return 0;
+}
+
+static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+
+ /* shared descriptor for ahash_update */
+ desc = ctx->sh_desc_update;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
+ ctx->ctx_len, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* shared descriptor for ahash_{final,finup} */
+ desc = ctx->sh_desc_fin;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
+ digestsize, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* key is immediate data for INIT and INITFINAL states */
+ ctx->adata.key_virt = ctx->key;
+
+ /* shared descriptor for first invocation of ahash_update */
+ desc = ctx->sh_desc_update_first;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
+ " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for ahash_digest */
+ desc = ctx->sh_desc_digest;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
+ digestsize, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+ return 0;
+}
+
+static int acmac_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+
+ /* shared descriptor for ahash_update */
+ desc = ctx->sh_desc_update;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
+ ctx->ctx_len, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for ahash_{final,finup} */
+ desc = ctx->sh_desc_fin;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
+ digestsize, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for first invocation of ahash_update */
+ desc = ctx->sh_desc_update_first;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
+ " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+
+ /* shared descriptor for ahash_digest */
+ desc = ctx->sh_desc_digest;
+ cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
+ digestsize, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), ctx->dir);
+ print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
return 0;
}
/* Digest hash size if it is too large */
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 *keylen, u8 *key_out, u32 digestsize)
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+ u32 digestsize)
{
struct device *jrdev = ctx->jrdev;
u32 *desc;
struct split_key_result result;
- dma_addr_t src_dma, dst_dma;
+ dma_addr_t key_dma;
int ret;
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
@@ -369,18 +395,9 @@
init_job_desc(desc, 0);
- src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, src_dma)) {
- dev_err(jrdev, "unable to map key input memory\n");
- kfree(desc);
- return -ENOMEM;
- }
- dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dst_dma)) {
- dev_err(jrdev, "unable to map key output memory\n");
- dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(jrdev, key_dma)) {
+ dev_err(jrdev, "unable to map key memory\n");
kfree(desc);
return -ENOMEM;
}
@@ -388,19 +405,18 @@
/* Job descriptor to perform unkeyed hash on key_in */
append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
- append_seq_in_ptr(desc, src_dma, *keylen, 0);
+ append_seq_in_ptr(desc, key_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
- append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+ append_seq_out_ptr(desc, key_dma, digestsize, 0);
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
result.err = 0;
init_completion(&result.completion);
@@ -410,15 +426,12 @@
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "digested key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in,
- digestsize, 1);
-#endif
+
+ print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
+ digestsize, 1);
}
- dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
- dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
*keylen = digestsize;
@@ -431,24 +444,20 @@
const u8 *key, unsigned int keylen)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
-#ifdef DEBUG
- printk(KERN_ERR "keylen %d\n", keylen);
-#endif
+ dev_dbg(jrdev, "keylen %d\n", keylen);
if (keylen > blocksize) {
- hashed_key = kmalloc_array(digestsize,
- sizeof(*hashed_key),
- GFP_KERNEL | GFP_DMA);
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!hashed_key)
return -ENOMEM;
- ret = hash_digest_key(ctx, key, &keylen, hashed_key,
- digestsize);
+ ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
if (ret)
goto bad_free_key;
key = hashed_key;
@@ -468,6 +477,18 @@
goto bad_free_key;
memcpy(ctx->key, key, keylen);
+
+ /*
+ * In case |user key| > |derived key|, using DKP<imm,imm>
+ * would result in invalid opcodes (last bytes of user key) in
+ * the resulting descriptor. Use DKP<ptr,imm> instead => both
+ * virtual and dma key addresses are needed.
+ */
+ if (keylen > ctx->adata.keylen_pad)
+ dma_sync_single_for_device(ctx->jrdev,
+ ctx->adata.key_dma,
+ ctx->adata.keylen_pad,
+ DMA_TO_DEVICE);
} else {
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
keylen, CAAM_MAX_HASH_KEY_SIZE);
@@ -483,9 +504,52 @@
return -EINVAL;
}
+static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+
+ if (keylen != AES_KEYSIZE_128) {
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
+ DMA_TO_DEVICE);
+ ctx->adata.keylen = keylen;
+
+ print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
+
+ return axcbc_set_sh_desc(ahash);
+}
+
+static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int err;
+
+ err = aes_check_keylen(keylen);
+ if (err) {
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return err;
+ }
+
+ /* key is immediate data for all cmac shared descriptors */
+ ctx->adata.key_virt = key;
+ ctx->adata.keylen = keylen;
+
+ print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ return acmac_set_sh_desc(ahash);
+}
+
/*
* ahash_edesc - s/w-extended ahash descriptor
- * @dst_dma: physical mapped address of req->result
* @sec4_sg_dma: physical mapped address of h/w link table
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -493,11 +557,10 @@
* @sec4_sg: h/w link table
*/
struct ahash_edesc {
- dma_addr_t dst_dma;
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
- u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
+ u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
struct sec4_sg_entry sec4_sg[0];
};
@@ -509,8 +572,6 @@
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
- if (edesc->dst_dma)
- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
@@ -527,12 +588,10 @@
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len, u32 flag)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
if (state->ctx_dma) {
- dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
state->ctx_dma = 0;
}
ahash_unmap(dev, edesc, req, dst_len);
@@ -545,31 +604,25 @@
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
-#ifdef DEBUG
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-#endif
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
- req->base.complete(&req->base, err);
+ req->base.complete(&req->base, ecode);
}
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
@@ -580,31 +633,28 @@
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
switch_buf(state);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-#endif
+ print_hex_dump_debug("result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
- req->base.complete(&req->base, err);
+ req->base.complete(&req->base, ecode);
}
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
@@ -614,31 +664,25 @@
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
-#ifdef DEBUG
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
+ memcpy(req->result, state->caam_ctx, digestsize);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-#endif
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
- req->base.complete(&req->base, err);
+ req->base.complete(&req->base, ecode);
}
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
@@ -649,31 +693,28 @@
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
-#ifdef DEBUG
int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
- dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
- caam_jr_strstatus(jrdev, err);
+ ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
switch_buf(state);
kfree(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
- ctx->ctx_len, 1);
+ print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
if (req->result)
- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
-#endif
+ print_hex_dump_debug("result@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
- req->base.complete(&req->base, err);
+ req->base.complete(&req->base, ecode);
}
/*
@@ -711,9 +752,10 @@
if (nents > 1 || first_sg) {
struct sec4_sg_entry *sg = edesc->sec4_sg;
- unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
+ unsigned int sgsize = sizeof(*sg) *
+ pad_sg_nents(first_sg + nents);
- sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+ sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->jrdev, src_dma)) {
@@ -747,6 +789,7 @@
u8 *buf = current_buf(state);
int *buflen = current_buflen(state);
u8 *next_buf = alt_buf(state);
+ int blocksize = crypto_ahash_blocksize(ahash);
int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
@@ -755,12 +798,25 @@
int ret = 0;
last_buflen = *next_buflen;
- *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ *next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ int pad_nents;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
@@ -778,15 +834,14 @@
}
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
- sizeof(struct sec4_sg_entry);
+ pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
+ sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_update,
+ edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
ctx->sh_desc_update_dma, flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -805,19 +860,18 @@
if (ret)
goto unmap_ctx;
- if (mapped_nents) {
- sg_to_sec4_sg_last(req->src, mapped_nents,
+ if (mapped_nents)
+ sg_to_sec4_sg_last(req->src, src_len,
edesc->sec4_sg + sec4_sg_src_index,
0);
- if (*next_buflen)
- scatterwalk_map_and_copy(next_buf, req->src,
- to_hash - *buflen,
- *next_buflen, 0);
- } else {
+ else
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
1);
- }
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -834,11 +888,9 @@
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
if (ret)
@@ -851,16 +903,15 @@
*buflen = *next_buflen;
*next_buflen = last_buflen;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
return ret;
- unmap_ctx:
+unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
@@ -876,18 +927,17 @@
GFP_KERNEL : GFP_ATOMIC;
int buflen = *current_buflen(state);
u32 *desc;
- int sec4_sg_bytes, sec4_sg_src_index;
+ int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret;
- sec4_sg_src_index = 1 + (buflen ? 1 : 0);
- sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+ sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
+ sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
- ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
- flags);
+ edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
+ ctx->sh_desc_fin_dma, flags);
if (!edesc)
return -ENOMEM;
@@ -896,7 +946,7 @@
edesc->sec4_sg_bytes = sec4_sg_bytes;
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -904,7 +954,7 @@
if (ret)
goto unmap_ctx;
- sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
+ sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -916,19 +966,11 @@
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
LDST_SGF);
+ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (ret)
@@ -936,7 +978,7 @@
return -EINPROGRESS;
unmap_ctx:
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
}
@@ -990,7 +1032,7 @@
edesc->src_nents = src_nents;
ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
- edesc->sec4_sg, DMA_TO_DEVICE);
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -1004,18 +1046,11 @@
if (ret)
goto unmap_ctx;
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
- ret = -ENOMEM;
- goto unmap_ctx;
- }
+ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (ret)
@@ -1023,7 +1058,7 @@
return -EINPROGRESS;
unmap_ctx:
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
kfree(edesc);
return ret;
}
@@ -1082,25 +1117,22 @@
desc = edesc->hw_desc;
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret) {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1131,31 +1163,30 @@
desc = edesc->hw_desc;
- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, state->buf_dma)) {
- dev_err(jrdev, "unable to map src\n");
- goto unmap;
+ if (buflen) {
+ state->buf_dma = dma_map_single(jrdev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map src\n");
+ goto unmap;
+ }
+
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
}
- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
-
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret)
goto unmap;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1178,6 +1209,7 @@
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = current_buf(state);
int *buflen = current_buflen(state);
+ int blocksize = crypto_ahash_blocksize(ahash);
u8 *next_buf = alt_buf(state);
int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
@@ -1186,12 +1218,25 @@
u32 *desc;
int ret = 0;
- *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ *next_buflen = in_len & (blocksize - 1);
to_hash = in_len - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
- src_nents = sg_nents_for_len(req->src,
- req->nbytes - *next_buflen);
+ int pad_nents;
+ int src_len = req->nbytes - *next_buflen;
+
+ src_nents = sg_nents_for_len(req->src, src_len);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
@@ -1208,14 +1253,14 @@
mapped_nents = 0;
}
- sec4_sg_bytes = (1 + mapped_nents) *
- sizeof(struct sec4_sg_entry);
+ pad_nents = pad_sg_nents(1 + mapped_nents);
+ sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
+ edesc = ahash_edesc_alloc(ctx, pad_nents,
ctx->sh_desc_update_first,
ctx->sh_desc_update_first_dma,
flags);
@@ -1231,8 +1276,7 @@
if (ret)
goto unmap_ctx;
- sg_to_sec4_sg_last(req->src, mapped_nents,
- edesc->sec4_sg + 1, 0);
+ sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
@@ -1257,11 +1301,9 @@
if (ret)
goto unmap_ctx;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (ret)
@@ -1277,13 +1319,12 @@
*buflen = *next_buflen;
*next_buflen = 0;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
return ret;
unmap_ctx:
@@ -1354,23 +1395,19 @@
goto unmap;
}
- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
- digestsize);
- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
- dev_err(jrdev, "unable to map dst\n");
+ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+ if (ret)
goto unmap;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ahash_unmap(jrdev, edesc, req, digestsize);
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
@@ -1394,15 +1431,26 @@
u8 *next_buf = alt_buf(state);
int *next_buflen = alt_buflen(state);
int to_hash;
+ int blocksize = crypto_ahash_blocksize(ahash);
u32 *desc;
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
int ret = 0;
- *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
- 1);
+ *next_buflen = req->nbytes & (blocksize - 1);
to_hash = req->nbytes - *next_buflen;
+ /*
+ * For XCBC and CMAC, if to_hash is multiple of block size,
+ * keep last block in internal buffer
+ */
+ if ((is_xcbc_aes(ctx->adata.algtype) ||
+ is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+ (*next_buflen == 0)) {
+ *next_buflen = blocksize;
+ to_hash -= blocksize;
+ }
+
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
req->nbytes - *next_buflen);
@@ -1453,11 +1501,9 @@
if (ret)
goto unmap_ctx;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (ret)
@@ -1475,11 +1521,10 @@
req->nbytes, 0);
switch_buf(state);
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
- *next_buflen, 1);
-#endif
+
+ print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
return ret;
unmap_ctx:
@@ -1502,6 +1547,7 @@
state->final = ahash_final_no_ctx;
state->ctx_dma = 0;
+ state->ctx_dma_len = 0;
state->current_buf = 0;
state->buf_dma = 0;
state->buflen_0 = 0;
@@ -1710,6 +1756,44 @@
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
+ }, {
+ .hmac_name = "xcbc(aes)",
+ .hmac_driver_name = "xcbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = axcbc_setkey,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
+ }, {
+ .hmac_name = "cmac(aes)",
+ .hmac_driver_name = "cmac-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = acmac_setkey,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
},
};
@@ -1751,14 +1835,55 @@
}
priv = dev_get_drvdata(ctx->jrdev->parent);
- ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ if (is_xcbc_aes(caam_hash->alg_type)) {
+ ctx->dir = DMA_TO_DEVICE;
+ ctx->key_dir = DMA_BIDIRECTIONAL;
+ ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+ ctx->ctx_len = 48;
+ } else if (is_cmac_aes(caam_hash->alg_type)) {
+ ctx->dir = DMA_TO_DEVICE;
+ ctx->key_dir = DMA_NONE;
+ ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+ ctx->ctx_len = 32;
+ } else {
+ if (priv->era >= 6) {
+ ctx->dir = DMA_BIDIRECTIONAL;
+ ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
+ } else {
+ ctx->dir = DMA_TO_DEVICE;
+ ctx->key_dir = DMA_NONE;
+ }
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+ }
+
+ if (ctx->key_dir != DMA_NONE) {
+ ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
+ ARRAY_SIZE(ctx->key),
+ ctx->key_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
+ dev_err(ctx->jrdev, "unable to map key\n");
+ caam_jr_free(ctx->jrdev);
+ return -ENOMEM;
+ }
+ }
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
+ offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+
+ if (ctx->key_dir != DMA_NONE)
+ dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key),
+ ctx->key_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
caam_jr_free(ctx->jrdev);
return -ENOMEM;
}
@@ -1772,16 +1897,14 @@
ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
sh_desc_digest);
- /* copy descriptor header template value */
- ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
-
- ctx->ctx_len = runninglen[(ctx->adata.algtype &
- OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT];
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
- return ahash_set_sh_desc(ahash);
+
+ /*
+ * For keyed hash algorithms shared descriptors
+ * will be created later in setkey() callback
+ */
+ return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -1789,13 +1912,16 @@
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
- offsetof(struct caam_hash_ctx,
- sh_desc_update_dma),
+ offsetof(struct caam_hash_ctx, key),
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ctx->key_dir != DMA_NONE)
+ dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
+ ARRAY_SIZE(ctx->key), ctx->key_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
-static void __exit caam_algapi_hash_exit(void)
+void caam_algapi_hash_exit(void)
{
struct caam_hash_alg *t_alg, *n;
@@ -1853,56 +1979,38 @@
return t_alg;
}
-static int __init caam_algapi_hash_init(void)
+int caam_algapi_hash_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
int i = 0, err = 0;
- struct caam_drv_private *priv;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
unsigned int md_limit = SHA512_DIGEST_SIZE;
- u32 cha_inst, cha_vid;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv)
- return -ENODEV;
+ u32 md_inst, md_vid;
/*
* Register crypto algorithms the device supports. First, identify
* presence and attributes of MD block.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ if (priv->era < 10) {
+ md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ } else {
+ u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ }
/*
* Skip registration of any hashing algorithms if MD block
* is not present.
*/
- if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
- return -ENODEV;
+ if (!md_inst)
+ return 0;
/* Limit digest size based on LP256 */
- if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+ if (md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
INIT_LIST_HEAD(&hash_list);
@@ -1913,14 +2021,16 @@
struct caam_hash_template *alg = driver_hash + i;
/* If MD size is not supported by device, skip registration */
- if (alg->template_ahash.halg.digestsize > md_limit)
+ if (is_mdha(alg->alg_type) &&
+ alg->template_ahash.halg.digestsize > md_limit)
continue;
/* register hmac version */
t_alg = caam_hash_alloc(alg, true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n", alg->driver_name);
+ pr_warn("%s alg allocation failed\n",
+ alg->hmac_driver_name);
continue;
}
@@ -1933,6 +2043,9 @@
} else
list_add_tail(&t_alg->entry, &hash_list);
+ if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
+ continue;
+
/* register unkeyed version */
t_alg = caam_hash_alloc(alg, false);
if (IS_ERR(t_alg)) {
@@ -1953,10 +2066,3 @@
return err;
}
-
-module_init(caam_algapi_hash_init);
-module_exit(caam_algapi_hash_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
new file mode 100644
index 0000000..78383d7
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017-2019 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamhash_desc.h"
+
+/**
+ * cnstr_shdsc_ahash - ahash shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ * @import_ctx: true if previous Context Register needs to be restored
+ * must be true for ahash update and final
+ * must be false for for ahash first and digest
+ * @era: SEC Era
+ */
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era)
+{
+ u32 op = adata->algtype;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Append key if it has been set; ahash update excluded */
+ if (state != OP_ALG_AS_UPDATE && adata->keylen) {
+ u32 *skip_key_load;
+
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ if (era < 6)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, adata);
+
+ set_jump_tgt_here(desc, skip_key_load);
+
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
+
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
+ */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+EXPORT_SYMBOL(cnstr_shdsc_ahash);
+
+/**
+ * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based
+ * hash algorithms
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ */
+void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len)
+{
+ u32 *skip_key_load;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip loading of key, context if already shared */
+ skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+
+ if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) {
+ append_key_as_imm(desc, adata->key_virt, adata->keylen,
+ adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else { /* UPDATE, FINALIZE */
+ if (is_xcbc_aes(adata->algtype))
+ /* Load K1 */
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC);
+ else /* CMAC */
+ append_key_as_imm(desc, adata->key_virt, adata->keylen,
+ adata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ /* Restore context */
+ append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ }
+
+ set_jump_tgt_here(desc, skip_key_load);
+
+ /* Class 1 operation */
+ append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
+ */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 |
+ FIFOLD_TYPE_MSG | FIFOLDST_VLF);
+
+ /*
+ * Save context:
+ * - xcbc: partial hash, keys K2 and K3
+ * - cmac: partial hash, constant L = E(K,0)
+ */
+ append_seq_store(desc, digestsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT)
+ /* Save K1 */
+ append_fifo_store(desc, adata->key_dma, adata->keylen,
+ LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK);
+}
+EXPORT_SYMBOL(cnstr_shdsc_sk_hash);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
+MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
new file mode 100644
index 0000000..4f369b8
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#ifndef _CAAMHASH_DESC_H_
+#define _CAAMHASH_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+static inline bool is_xcbc_aes(u32 algtype)
+{
+ return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
+ (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC);
+}
+
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era);
+
+void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len);
+#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index f26d62e..83f96d4 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1,7 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2018-2019 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@@ -15,19 +17,36 @@
#include "sg_sw_sec4.h"
#include "caampkc.h"
-#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
+#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
- sizeof(struct rsa_priv_f1_pdb))
+ SIZEOF_RSA_PRIV_F1_PDB)
#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
- sizeof(struct rsa_priv_f2_pdb))
+ SIZEOF_RSA_PRIV_F2_PDB)
#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
- sizeof(struct rsa_priv_f3_pdb))
+ SIZEOF_RSA_PRIV_F3_PDB)
+#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
+
+/* buffer filled with zeros, used for padding */
+static u8 *zero_buffer;
+
+/*
+ * variable used to avoid double free of resources in case
+ * algorithm registration was unsuccessful
+ */
+static bool init_done;
+
+struct caam_akcipher_alg {
+ struct akcipher_alg akcipher;
+ bool registered;
+};
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
{
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
- dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
@@ -99,9 +118,10 @@
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
+ int ecode = 0;
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
@@ -109,7 +129,7 @@
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, err);
+ akcipher_request_complete(req, ecode);
}
static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
@@ -117,9 +137,10 @@
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
+ int ecode = 0;
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
@@ -127,7 +148,7 @@
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, err);
+ akcipher_request_complete(req, ecode);
}
static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
@@ -135,9 +156,10 @@
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
+ int ecode = 0;
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
@@ -145,7 +167,7 @@
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, err);
+ akcipher_request_complete(req, ecode);
}
static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
@@ -153,9 +175,10 @@
{
struct akcipher_request *req = context;
struct rsa_edesc *edesc;
+ int ecode = 0;
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
@@ -163,9 +186,16 @@
rsa_io_unmap(dev, edesc, req);
kfree(edesc);
- akcipher_request_complete(req, err);
+ akcipher_request_complete(req, ecode);
}
+/**
+ * Count leading zeros, need it to strip, from a given scatterlist
+ *
+ * @sgl : scatterlist to count zeros from
+ * @nbytes: number of zeros, in bytes, to strip
+ * @flags : operation flags
+ */
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
unsigned int nbytes,
unsigned int flags)
@@ -185,7 +215,8 @@
lzeros = 0;
len = 0;
while (nbytes > 0) {
- while (len && !*buff) {
+ /* do not strip more than given bytes */
+ while (len && !*buff && lzeros < nbytes) {
lzeros++;
len--;
buff++;
@@ -216,6 +247,7 @@
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = ctx->dev;
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ struct caam_rsa_key *key = &ctx->key;
struct rsa_edesc *edesc;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
@@ -223,22 +255,45 @@
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+ unsigned int diff_size = 0;
int lzeros;
- lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
- if (lzeros < 0)
- return ERR_PTR(lzeros);
+ if (req->src_len > key->n_sz) {
+ /*
+ * strip leading zeros and
+ * return the number of zeros to skip
+ */
+ lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
+ key->n_sz, sg_flags);
+ if (lzeros < 0)
+ return ERR_PTR(lzeros);
- req->src_len -= lzeros;
- req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+ req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
+ lzeros);
+ req_ctx->fixup_src_len = req->src_len - lzeros;
+ } else {
+ /*
+ * input src is less then n key modulus,
+ * so there will be zero padding
+ */
+ diff_size = key->n_sz - req->src_len;
+ req_ctx->fixup_src = req->src;
+ req_ctx->fixup_src_len = req->src_len;
+ }
- src_nents = sg_nents_for_len(req->src, req->src_len);
+ src_nents = sg_nents_for_len(req_ctx->fixup_src,
+ req_ctx->fixup_src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
- if (src_nents > 1)
- sec4_sg_len = src_nents;
+ if (!diff_size && src_nents == 1)
+ sec4_sg_len = 0; /* no need for an input hw s/g table */
+ else
+ sec4_sg_len = src_nents + !!diff_size;
+ sec4_sg_index = sec4_sg_len;
if (dst_nents > 1)
- sec4_sg_len += dst_nents;
+ sec4_sg_len += pad_sg_nents(dst_nents);
+ else
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -248,7 +303,7 @@
if (!edesc)
return ERR_PTR(-ENOMEM);
- sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
if (unlikely(!sgc)) {
dev_err(dev, "unable to map source\n");
goto src_fail;
@@ -261,14 +316,16 @@
}
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
+ if (diff_size)
+ dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
+ 0);
- sec4_sg_index = 0;
- if (src_nents > 1) {
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
- sec4_sg_index += src_nents;
- }
+ if (sec4_sg_index)
+ sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
+ edesc->sec4_sg + !!diff_size, 0);
+
if (dst_nents > 1)
- sg_to_sec4_sg_last(req->dst, dst_nents,
+ sg_to_sec4_sg_last(req->dst, req->dst_len,
edesc->sec4_sg + sec4_sg_index, 0);
/* Save nents for later use in Job Descriptor */
@@ -287,12 +344,16 @@
edesc->sec4_sg_bytes = sec4_sg_bytes;
+ print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ edesc->sec4_sg_bytes, 1);
+
return edesc;
sec4_sg_fail:
dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
dst_fail:
- dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
src_fail:
kfree(edesc);
return ERR_PTR(-ENOMEM);
@@ -302,6 +363,7 @@
struct rsa_edesc *edesc)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
struct device *dev = ctx->dev;
@@ -326,7 +388,7 @@
pdb->f_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->f_dma = sg_dma_address(req->src);
+ pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -338,7 +400,7 @@
}
pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
- pdb->f_len = req->src_len;
+ pdb->f_len = req_ctx->fixup_src_len;
return 0;
}
@@ -371,7 +433,9 @@
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -434,7 +498,9 @@
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -521,7 +587,9 @@
pdb->g_dma = edesc->sec4_sg_dma;
sec4_sg_index += edesc->src_nents;
} else {
- pdb->g_dma = sg_dma_address(req->src);
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
}
if (edesc->dst_nents > 1) {
@@ -814,7 +882,7 @@
return ret;
/* Copy key in DMA zone */
- rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
if (!rsa_key->e)
goto err;
@@ -836,8 +904,6 @@
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
- memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
-
return 0;
err:
caam_rsa_free_key(rsa_key);
@@ -918,11 +984,11 @@
return ret;
/* Copy key in DMA zone */
- rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
if (!rsa_key->d)
goto err;
- rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
if (!rsa_key->e)
goto err;
@@ -945,9 +1011,6 @@
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
- memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
- memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
-
caam_rsa_set_priv_key_form(ctx, &raw_key);
return 0;
@@ -976,6 +1039,15 @@
return PTR_ERR(ctx->dev);
}
+ ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
+ CAAM_RSA_MAX_INPUT_SIZE - 1,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
+ dev_err(ctx->dev, "unable to map padding\n");
+ caam_jr_free(ctx->dev);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -985,90 +1057,79 @@
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct caam_rsa_key *key = &ctx->key;
+ dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
+ 1, DMA_TO_DEVICE);
caam_rsa_free_key(key);
caam_jr_free(ctx->dev);
}
-static struct akcipher_alg caam_rsa = {
- .encrypt = caam_rsa_enc,
- .decrypt = caam_rsa_dec,
- .sign = caam_rsa_dec,
- .verify = caam_rsa_enc,
- .set_pub_key = caam_rsa_set_pub_key,
- .set_priv_key = caam_rsa_set_priv_key,
- .max_size = caam_rsa_max_size,
- .init = caam_rsa_init_tfm,
- .exit = caam_rsa_exit_tfm,
- .reqsize = sizeof(struct caam_rsa_req_ctx),
- .base = {
- .cra_name = "rsa",
- .cra_driver_name = "rsa-caam",
- .cra_priority = 3000,
- .cra_module = THIS_MODULE,
- .cra_ctxsize = sizeof(struct caam_rsa_ctx),
- },
+static struct caam_akcipher_alg caam_rsa = {
+ .akcipher = {
+ .encrypt = caam_rsa_enc,
+ .decrypt = caam_rsa_dec,
+ .set_pub_key = caam_rsa_set_pub_key,
+ .set_priv_key = caam_rsa_set_priv_key,
+ .max_size = caam_rsa_max_size,
+ .init = caam_rsa_init_tfm,
+ .exit = caam_rsa_exit_tfm,
+ .reqsize = sizeof(struct caam_rsa_req_ctx),
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-caam",
+ .cra_priority = 3000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct caam_rsa_ctx),
+ },
+ }
};
/* Public Key Cryptography module initialization handler */
-static int __init caam_pkc_init(void)
+int caam_pkc_init(struct device *ctrldev)
{
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
- u32 cha_inst, pk_inst;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ u32 pk_inst;
int err;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv)
- return -ENODEV;
+ init_done = false;
/* Determine public key hardware accelerator presence. */
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+ if (priv->era < 10)
+ pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+ else
+ pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
/* Do not register algorithms if PKHA is not present. */
if (!pk_inst)
- return -ENODEV;
+ return 0;
- err = crypto_register_akcipher(&caam_rsa);
- if (err)
+ /* allocate zero buffer, used for padding input */
+ zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
+ GFP_KERNEL);
+ if (!zero_buffer)
+ return -ENOMEM;
+
+ err = crypto_register_akcipher(&caam_rsa.akcipher);
+
+ if (err) {
+ kfree(zero_buffer);
dev_warn(ctrldev, "%s alg registration failed\n",
- caam_rsa.base.cra_driver_name);
- else
+ caam_rsa.akcipher.base.cra_driver_name);
+ } else {
+ init_done = true;
+ caam_rsa.registered = true;
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+ }
return err;
}
-static void __exit caam_pkc_exit(void)
+void caam_pkc_exit(void)
{
- crypto_unregister_akcipher(&caam_rsa);
+ if (!init_done)
+ return;
+
+ if (caam_rsa.registered)
+ crypto_unregister_akcipher(&caam_rsa.akcipher);
+
+ kfree(zero_buffer);
}
-
-module_init(caam_pkc_init);
-module_exit(caam_pkc_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
-MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 82645bc..2c488c9 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -89,18 +89,25 @@
* caam_rsa_ctx - per session context.
* @key : RSA key in DMA zone
* @dev : device structure
+ * @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
struct caam_rsa_key key;
struct device *dev;
+ dma_addr_t padding_dma;
+
};
/**
* caam_rsa_req_ctx - per request context.
- * @src: input scatterlist (stripped of leading zeros)
+ * @src : input scatterlist (stripped of leading zeros)
+ * @fixup_src : input scatterlist (that might be stripped of leading zeros)
+ * @fixup_src_len : length of the fixup_src input scatterlist
*/
struct caam_rsa_req_ctx {
struct scatterlist src[2];
+ struct scatterlist *fixup_src;
+ unsigned int fixup_src_len;
};
/**
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index fde07d4..e8baaca 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,7 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2018-2019 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -51,7 +53,7 @@
L1_CACHE_BYTES)
/* length of descriptors */
-#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
+#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ_MAX * 2)
#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
@@ -78,6 +80,12 @@
static struct caam_rng_ctx *rng_ctx;
+/*
+ * Variable used to avoid double free of resources in case
+ * algorithm registration was unsuccessful
+ */
+static bool init_done;
+
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
{
if (bd->addr)
@@ -111,10 +119,8 @@
/* Buffer refilled, invalidate cache */
dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
- DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
-#endif
+ print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ bd->buf, RN_BUF_SIZE, 1);
}
static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
@@ -207,10 +213,10 @@
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+
return 0;
}
@@ -231,10 +237,10 @@
}
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
- desc, desc_bytes(desc), 1);
-#endif
+
+ print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+
return 0;
}
@@ -294,49 +300,33 @@
.read = caam_read,
};
-static void __exit caam_rng_exit(void)
+void caam_rng_exit(void)
{
+ if (!init_done)
+ return;
+
caam_jr_free(rng_ctx->jrdev);
hwrng_unregister(&caam_rng);
kfree(rng_ctx);
}
-static int __init caam_rng_init(void)
+int caam_rng_init(struct device *ctrldev)
{
struct device *dev;
- struct device_node *dev_node;
- struct platform_device *pdev;
- struct device *ctrldev;
- struct caam_drv_private *priv;
+ u32 rng_inst;
+ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int err;
-
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node) {
- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
- if (!dev_node)
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(dev_node);
- if (!pdev) {
- of_node_put(dev_node);
- return -ENODEV;
- }
-
- ctrldev = &pdev->dev;
- priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
-
- /*
- * If priv is NULL, it's probably because the caam driver wasn't
- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
- */
- if (!priv)
- return -ENODEV;
+ init_done = false;
/* Check for an instantiated RNG before registration */
- if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
- return -ENODEV;
+ if (priv->era < 10)
+ rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ else
+ rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
+
+ if (!rng_inst)
+ return 0;
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
@@ -353,7 +343,12 @@
goto free_rng_ctx;
dev_info(dev, "registering rng-caam\n");
- return hwrng_register(&caam_rng);
+
+ err = hwrng_register(&caam_rng);
+ if (!err) {
+ init_done = true;
+ return err;
+ }
free_rng_ctx:
kfree(rng_ctx);
@@ -361,10 +356,3 @@
caam_jr_free(dev);
return err;
}
-
-module_init(caam_rng_init);
-module_exit(caam_rng_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
-MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 1c71e0c..60e2a54 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
@@ -31,14 +32,18 @@
#include <crypto/null.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/des.h>
+#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
+#include <crypto/chacha.h>
+#include <crypto/poly1305.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 538c01f..db22777 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,7 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/* * CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2018-2019 NXP
*/
#include <linux/device.h>
@@ -16,28 +18,14 @@
#include "desc_constr.h"
#include "ctrl.h"
-bool caam_little_end;
-EXPORT_SYMBOL(caam_little_end);
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
-bool caam_imx;
-EXPORT_SYMBOL(caam_imx);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
#endif
/*
- * i.MX targets tend to have clock control subsystems that can
- * enable/disable clocking to our device.
- */
-static inline struct clk *caam_drv_identify_clk(struct device *dev,
- char *clk_name)
-{
- return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
-}
-
-/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
*/
@@ -105,11 +93,16 @@
struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
struct caam_deco __iomem *deco = ctrlpriv->deco;
unsigned int timeout = 100000;
- u32 deco_dbg_reg, flags;
+ u32 deco_dbg_reg, deco_state, flags;
int i;
- if (ctrlpriv->virt_en == 1) {
+ if (ctrlpriv->virt_en == 1 ||
+ /*
+ * Apparently on i.MX8MQ it doesn't matter if virt_en == 1
+ * and the following steps should be performed regardless
+ */
+ of_machine_is_compatible("fsl,imx8mq")) {
clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
@@ -148,13 +141,22 @@
timeout = 10000000;
do {
deco_dbg_reg = rd_reg32(&deco->desc_dbg);
+
+ if (ctrlpriv->era < 10)
+ deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
+ DESC_DBG_DECO_STAT_SHIFT;
+ else
+ deco_state = (rd_reg32(&deco->dbg_exec) &
+ DESC_DER_DECO_STAT_MASK) >>
+ DESC_DER_DECO_STAT_SHIFT;
+
/*
* If an error occured in the descriptor, then
* the DECO status field will be set to 0x0D
*/
- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
- DESC_DBG_DECO_STAT_HOST_ERR)
+ if (deco_state == DECO_STAT_HOST_ERR)
break;
+
cpu_relax();
} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
@@ -316,8 +318,8 @@
of_platform_depopulate(ctrldev);
#ifdef CONFIG_CAAM_QI
- if (ctrlpriv->qidev)
- caam_qi_shutdown(ctrlpriv->qidev);
+ if (ctrlpriv->qi_init)
+ caam_qi_shutdown(ctrldev);
#endif
/*
@@ -335,13 +337,6 @@
/* Unmap controller region */
iounmap(ctrl);
- /* shut clocks off before finalizing shutdown */
- clk_disable_unprepare(ctrlpriv->caam_ipg);
- if (ctrlpriv->caam_mem)
- clk_disable_unprepare(ctrlpriv->caam_mem);
- clk_disable_unprepare(ctrlpriv->caam_aclk);
- if (ctrlpriv->caam_emi_slow)
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
return 0;
}
@@ -461,6 +456,24 @@
return caam_get_era_from_hw(ctrl);
}
+/*
+ * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP)
+ * have an issue wherein AXI bus transactions may not occur in the correct
+ * order. This isn't a problem running single descriptors, but can be if
+ * running multiple concurrent descriptors. Reworking the driver to throttle
+ * to single requests is impractical, thus the workaround is to limit the AXI
+ * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
+ * from occurring.
+ */
+static void handle_imx6_err005766(u32 *mcr)
+{
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6dl") ||
+ of_machine_is_compatible("fsl,imx6qp"))
+ clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
+ 1 << MCFGR_AXIPIPE_SHIFT);
+}
+
static const struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
@@ -472,25 +485,104 @@
};
MODULE_DEVICE_TABLE(of, caam_match);
+struct caam_imx_data {
+ const struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static const struct clk_bulk_data caam_imx6_clks[] = {
+ { .id = "ipg" },
+ { .id = "mem" },
+ { .id = "aclk" },
+ { .id = "emi_slow" },
+};
+
+static const struct caam_imx_data caam_imx6_data = {
+ .clks = caam_imx6_clks,
+ .num_clks = ARRAY_SIZE(caam_imx6_clks),
+};
+
+static const struct clk_bulk_data caam_imx7_clks[] = {
+ { .id = "ipg" },
+ { .id = "aclk" },
+};
+
+static const struct caam_imx_data caam_imx7_data = {
+ .clks = caam_imx7_clks,
+ .num_clks = ARRAY_SIZE(caam_imx7_clks),
+};
+
+static const struct clk_bulk_data caam_imx6ul_clks[] = {
+ { .id = "ipg" },
+ { .id = "mem" },
+ { .id = "aclk" },
+};
+
+static const struct caam_imx_data caam_imx6ul_data = {
+ .clks = caam_imx6ul_clks,
+ .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
+};
+
+static const struct soc_device_attribute caam_imx_soc_table[] = {
+ { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
+ { .soc_id = "i.MX6*", .data = &caam_imx6_data },
+ { .soc_id = "i.MX7*", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8MQ", .data = &caam_imx7_data },
+ { .family = "Freescale i.MX" },
+ { /* sentinel */ }
+};
+
+static void disable_clocks(void *data)
+{
+ struct caam_drv_private *ctrlpriv = data;
+
+ clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
+}
+
+static int init_clocks(struct device *dev, const struct caam_imx_data *data)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ int ret;
+
+ ctrlpriv->num_clks = data->num_clks;
+ ctrlpriv->clks = devm_kmemdup(dev, data->clks,
+ data->num_clks * sizeof(data->clks[0]),
+ GFP_KERNEL);
+ if (!ctrlpriv->clks)
+ return -ENOMEM;
+
+ ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
+ if (ret) {
+ dev_err(dev,
+ "Failed to request all necessary clocks\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
+ if (ret) {
+ dev_err(dev,
+ "Failed to prepare/enable all necessary clocks\n");
+ return ret;
+ }
+
+ return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
+}
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
- static const struct soc_device_attribute imx_soc[] = {
- {.family = "Freescale i.MX"},
- {},
- };
+ const struct soc_device_attribute *imx_soc_match;
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
- struct clk *clk;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
u32 scfgr, comp_params;
- u32 cha_vid_ls;
+ u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
@@ -502,101 +594,68 @@
dev_set_drvdata(dev, ctrlpriv);
nprop = pdev->dev.of_node;
- caam_imx = (bool)soc_device_match(imx_soc);
+ imx_soc_match = soc_device_match(caam_imx_soc_table);
+ caam_imx = (bool)imx_soc_match;
- /* Enable clocking */
- clk = caam_drv_identify_clk(&pdev->dev, "ipg");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM ipg clk: %d\n", ret);
- return ret;
- }
- ctrlpriv->caam_ipg = clk;
+ if (imx_soc_match) {
+ if (!imx_soc_match->data) {
+ dev_err(dev, "No clock data provided for i.MX SoC");
+ return -EINVAL;
+ }
- if (!of_machine_is_compatible("fsl,imx7d") &&
- !of_machine_is_compatible("fsl,imx7s")) {
- clk = caam_drv_identify_clk(&pdev->dev, "mem");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM mem clk: %d\n", ret);
+ ret = init_clocks(dev, imx_soc_match->data);
+ if (ret)
return ret;
- }
- ctrlpriv->caam_mem = clk;
}
- clk = caam_drv_identify_clk(&pdev->dev, "aclk");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM aclk clk: %d\n", ret);
- return ret;
- }
- ctrlpriv->caam_aclk = clk;
-
- if (!of_machine_is_compatible("fsl,imx6ul") &&
- !of_machine_is_compatible("fsl,imx7d") &&
- !of_machine_is_compatible("fsl,imx7s")) {
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
- return ret;
- }
- ctrlpriv->caam_emi_slow = clk;
- }
-
- ret = clk_prepare_enable(ctrlpriv->caam_ipg);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
- return ret;
- }
-
- if (ctrlpriv->caam_mem) {
- ret = clk_prepare_enable(ctrlpriv->caam_mem);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
- ret);
- goto disable_caam_ipg;
- }
- }
-
- ret = clk_prepare_enable(ctrlpriv->caam_aclk);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
- goto disable_caam_mem;
- }
-
- if (ctrlpriv->caam_emi_slow) {
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
- ret);
- goto disable_caam_aclk;
- }
- }
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = of_iomap(nprop, 0);
- if (ctrl == NULL) {
+ if (!ctrl) {
dev_err(dev, "caam: of_iomap() failed\n");
- ret = -ENOMEM;
- goto disable_caam_emi_slow;
+ return -ENOMEM;
}
caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
(CSTA_PLEND | CSTA_ALT_PLEND));
-
- /* Finding the page size for using the CTPR_MS register */
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
- pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+ if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
+ caam_ptr_sz = sizeof(u64);
+ else
+ caam_ptr_sz = sizeof(u32);
+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+
+#ifdef CONFIG_CAAM_QI
+ /* If (DPAA 1.x) QI present, check whether dependencies are available */
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
+ ret = qman_is_probed();
+ if (!ret) {
+ ret = -EPROBE_DEFER;
+ goto iounmap_ctrl;
+ } else if (ret < 0) {
+ dev_err(dev, "failing probe due to qman probe error\n");
+ ret = -ENODEV;
+ goto iounmap_ctrl;
+ }
+
+ ret = qman_portals_probed();
+ if (!ret) {
+ ret = -EPROBE_DEFER;
+ goto iounmap_ctrl;
+ } else if (ret < 0) {
+ dev_err(dev, "failing probe due to qman portals probe error\n");
+ ret = -ENODEV;
+ goto iounmap_ctrl;
+ }
+ }
+#endif
/* Allocating the BLOCK_OFFSET based on the supported page size on
* the platform
*/
+ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
if (pg_size == 0)
BLOCK_OFFSET = PG_SIZE_4K;
else
@@ -621,7 +680,6 @@
* In case of SoCs with Management Complex, MC f/w performs
* the configuration.
*/
- caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
ctrlpriv->mc_en = !!np;
of_node_put(np);
@@ -633,6 +691,8 @@
(sizeof(dma_addr_t) == sizeof(u64) ?
MCFGR_LONG_PTR : 0));
+ handle_imx6_err005766(&ctrl->mcr);
+
/*
* Read the Compile Time paramters and SCFGR to determine
* if Virtualization is enabled for this platform
@@ -659,28 +719,14 @@
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
- if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (caam_dpaa2)
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
- else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
- else
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
- } else {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- }
+ ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
goto iounmap_ctrl;
}
ctrlpriv->era = caam_get_era(ctrl);
-
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
- goto iounmap_ctrl;
- }
+ ctrlpriv->domain = iommu_get_domain_for_dev(dev);
#ifdef CONFIG_DEBUG_FS
/*
@@ -694,21 +740,7 @@
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
#endif
- ring = 0;
- for_each_available_child_of_node(nprop, np)
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
- ((__force uint8_t *)ctrl +
- (ring + JR_BLOCK_NUMBER) *
- BLOCK_OFFSET
- );
- ctrlpriv->total_jobrs++;
- ring++;
- }
-
/* Check to see if (DPAA 1.x) QI present. If so, enable */
- ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present && !caam_dpaa2) {
ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
((__force uint8_t *)ctrl +
@@ -725,6 +757,25 @@
#endif
}
+ ret = of_platform_populate(nprop, caam_match, NULL, dev);
+ if (ret) {
+ dev_err(dev, "JR platform devices creation error\n");
+ goto shutdown_qi;
+ }
+
+ ring = 0;
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
+ (ring + JR_BLOCK_NUMBER) *
+ BLOCK_OFFSET
+ );
+ ctrlpriv->total_jobrs++;
+ ring++;
+ }
+
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
@@ -732,15 +783,19 @@
goto caam_remove;
}
- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+ if (ctrlpriv->era < 10)
+ rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ else
+ rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
+ CHA_VER_VID_SHIFT;
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
* In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!ctrlpriv->mc_en &&
- (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ if (!ctrlpriv->mc_en && rng_vid >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
@@ -848,27 +903,18 @@
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_kek = debugfs_create_blob("kek",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_kek_wrap);
+ debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_kek_wrap);
ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_tkek_wrap);
+ debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tkek_wrap);
ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
- S_IRUSR |
- S_IRGRP | S_IROTH,
- ctrlpriv->ctl,
- &ctrlpriv->ctl_tdsk_wrap);
+ debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tdsk_wrap);
#endif
return 0;
@@ -876,18 +922,13 @@
caam_remove(pdev);
return ret;
+shutdown_qi:
+#ifdef CONFIG_CAAM_QI
+ if (ctrlpriv->qi_init)
+ caam_qi_shutdown(dev);
+#endif
iounmap_ctrl:
iounmap(ctrl);
-disable_caam_emi_slow:
- if (ctrlpriv->caam_emi_slow)
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-disable_caam_aclk:
- clk_disable_unprepare(ctrlpriv->caam_aclk);
-disable_caam_mem:
- if (ctrlpriv->caam_mem)
- clk_disable_unprepare(ctrlpriv->caam_mem);
-disable_caam_ipg:
- clk_disable_unprepare(ctrlpriv->caam_ipg);
return ret;
}
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index f76ff16..4b6854b 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -4,6 +4,7 @@
* Definitions to support CAAM descriptor instruction generation
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*/
#ifndef DESC_H
@@ -242,6 +243,7 @@
#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
/* Offset in source/destination */
@@ -284,6 +286,12 @@
#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+/* Special Length definitions when dst=sm, nfifo-{sm,m} */
+#define LDLEN_MATH0 0
+#define LDLEN_MATH1 1
+#define LDLEN_MATH2 2
+#define LDLEN_MATH3 3
+
/*
* FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
* Command Constructs
@@ -408,6 +416,7 @@
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
@@ -1133,6 +1142,12 @@
#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
+/* version register fields */
+#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
+#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
+#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
+#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
+
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
@@ -1140,6 +1155,7 @@
#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
@@ -1152,6 +1168,8 @@
#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_AAI_SHIFT 4
#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
@@ -1199,6 +1217,11 @@
#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+/* Chacha20 AAI set */
+#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
+
/* hmac/smac AAI set */
#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
@@ -1387,6 +1410,7 @@
#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
#define MOVE_DEST_SHIFT 16
#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
@@ -1413,6 +1437,10 @@
#define MOVELEN_MRSEL_SHIFT 0
#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
/*
* MATH Command Constructs
@@ -1589,6 +1617,7 @@
#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index d4256fa..62ce642 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -3,6 +3,7 @@
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
*/
#ifndef DESC_CONSTR_H
@@ -13,9 +14,41 @@
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
-#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_PTR_SZ caam_ptr_sz
+#define CAAM_PTR_SZ_MAX sizeof(dma_addr_t)
+#define CAAM_PTR_SZ_MIN sizeof(u32)
#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
-#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+#define __DESC_JOB_IO_LEN(n) (CAAM_CMD_SZ * 5 + (n) * 3)
+#define DESC_JOB_IO_LEN __DESC_JOB_IO_LEN(CAAM_PTR_SZ)
+#define DESC_JOB_IO_LEN_MAX __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MAX)
+#define DESC_JOB_IO_LEN_MIN __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MIN)
+
+/*
+ * The CAAM QI hardware constructs a job descriptor which points
+ * to shared descriptor (as pointed by context_a of FQ to CAAM).
+ * When the job descriptor is executed by deco, the whole job
+ * descriptor together with shared descriptor gets loaded in
+ * deco buffer which is 64 words long (each 32-bit).
+ *
+ * The job descriptor constructed by QI hardware has layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents
+ * into deco buffer.
+ *
+ * Apart from shdesc contents, the total number of words that
+ * get loaded in deco buffer are '8' or '11'. The remaining words
+ * in deco buffer can be used for storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) / CAAM_CMD_SZ)
#ifdef DEBUG
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
@@ -36,6 +69,17 @@
(LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
extern bool caam_little_end;
+extern size_t caam_ptr_sz;
+
+/*
+ * HW fetches 4 S/G table entries at a time, irrespective of how many entries
+ * are in the table. It's SW's responsibility to make sure these accesses
+ * do not have side effects.
+ */
+static inline int pad_sg_nents(int sg_nents)
+{
+ return ALIGN(sg_nents, 4);
+}
static inline int desc_len(u32 * const desc)
{
@@ -92,9 +136,15 @@
static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
{
- dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
+ if (caam_ptr_sz == sizeof(dma_addr_t)) {
+ dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
- *offset = cpu_to_caam_dma(ptr);
+ *offset = cpu_to_caam_dma(ptr);
+ } else {
+ u32 *offset = (u32 *)desc_end(desc);
+
+ *offset = cpu_to_caam_dma(ptr);
+ }
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
CAAM_PTR_SZ / CAAM_CMD_SZ);
@@ -189,6 +239,7 @@
}
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
+APPEND_CMD_RET(move_len, MOVE_LEN)
static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
@@ -327,7 +378,11 @@
u32 options) \
{ \
PRINT_POS; \
- append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+ if (options & LDST_LEN_MASK) \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options); \
+ else \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options | \
+ sizeof(type)); \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
@@ -441,8 +496,8 @@
* functions where it is used.
* @keylen: length of the provided algorithm key, in bytes
* @keylen_pad: padded length of the provided algorithm key, in bytes
- * @key: address where algorithm key resides; virtual address if key_inline
- * is true, dma (bus) address if key_inline is false.
+ * @key_dma: dma (bus) address where algorithm key resides
+ * @key_virt: virtual address where algorithm key resides
* @key_inline: true - key can be inlined in the descriptor; false - key is
* referenced by the descriptor
*/
@@ -450,10 +505,8 @@
u32 algtype;
unsigned int keylen;
unsigned int keylen_pad;
- union {
- dma_addr_t key_dma;
- const void *key_virt;
- };
+ dma_addr_t key_dma;
+ const void *key_virt;
bool key_inline;
};
@@ -519,14 +572,26 @@
if (adata->key_inline) {
int words;
- append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
- OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
- adata->keylen);
- append_data(desc, adata->key_virt, adata->keylen);
+ if (adata->keylen > adata->keylen_pad) {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_PTR |
+ OP_PCL_DKP_DST_IMM | adata->keylen);
+ append_ptr(desc, adata->key_dma);
+
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ CAAM_PTR_SZ) / CAAM_CMD_SZ;
+ } else {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_IMM |
+ OP_PCL_DKP_DST_IMM | adata->keylen);
+ append_data(desc, adata->key_virt, adata->keylen);
+
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ ALIGN(adata->keylen, CAAM_CMD_SZ)) /
+ CAAM_CMD_SZ;
+ }
/* Reserve space in descriptor buffer for the derived key */
- words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
- ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
if (words)
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
} else {
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c
new file mode 100644
index 0000000..c5bfc92
--- /dev/null
+++ b/drivers/crypto/caam/dpseci-debugfs.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include "dpseci-debugfs.h"
+
+static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private;
+ u32 fqid, fcnt, bcnt;
+ int i, err;
+
+ seq_printf(file, "FQ stats for %s:\n", dev_name(priv->dev));
+ seq_printf(file, "%s%16s%16s\n",
+ "Rx-VFQID",
+ "Pending frames",
+ "Pending bytes");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ fqid = priv->rx_queue_attr[i].fqid;
+ err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt);
+ if (err)
+ continue;
+
+ seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt);
+ }
+
+ seq_printf(file, "%s%16s%16s\n",
+ "Tx-VFQID",
+ "Pending frames",
+ "Pending bytes");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ fqid = priv->tx_queue_attr[i].fqid;
+ err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt);
+ if (err)
+ continue;
+
+ seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt);
+ }
+
+ return 0;
+}
+
+static int dpseci_dbg_fqs_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dpaa2_caam_priv *priv;
+
+ priv = (struct dpaa2_caam_priv *)inode->i_private;
+
+ err = single_open(file, dpseci_dbg_fqs_show, priv);
+ if (err < 0)
+ dev_err(priv->dev, "single_open() failed\n");
+
+ return err;
+}
+
+static const struct file_operations dpseci_dbg_fq_ops = {
+ .open = dpseci_dbg_fqs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv)
+{
+ priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL);
+
+ debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv,
+ &dpseci_dbg_fq_ops);
+}
+
+void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv)
+{
+ debugfs_remove_recursive(priv->dfs_root);
+}
diff --git a/drivers/crypto/caam/dpseci-debugfs.h b/drivers/crypto/caam/dpseci-debugfs.h
new file mode 100644
index 0000000..bc22af7
--- /dev/null
+++ b/drivers/crypto/caam/dpseci-debugfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef DPSECI_DEBUGFS_H
+#define DPSECI_DEBUGFS_H
+
+#include <linux/dcache.h>
+#include "caamalg_qi2.h"
+
+#ifdef CONFIG_DEBUG_FS
+void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv);
+void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv);
+#else
+static inline void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) {}
+static inline void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) {}
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* DPSECI_DEBUGFS_H */
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
new file mode 100644
index 0000000..8a68531
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+#include "dpseci.h"
+#include "dpseci_cmd.h"
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an already created
+ * object; an object may have been declared statically in the DPL
+ * or created dynamically.
+ * This function returns a unique authentication token, associated with the
+ * specific object ID and the specific MC portal; this token must be used in all
+ * subsequent commands for this specific object.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_open *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are allowed on the
+ * object without opening a new control session.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_is_enabled *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_attributes *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
+ attr->options = le32_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
+ * Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
+ DEST_TYPE);
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+ attr->order_preservation_en =
+ dpseci_get_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_congestion_notification() - Get congestion group notification
+ * configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.priority = rsp_params->priority;
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
+ CGN_DEST_TYPE);
+ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
new file mode 100644
index 0000000..4550e13
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+#ifndef _DPSECI_H_
+#define _DPSECI_H_
+
+/*
+ * Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx queues per DPSECI object
+ */
+#define DPSECI_MAX_QUEUE_NUM 16
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (u8)(-1)
+
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * Enable the Congestion Group support
+ */
+#define DPSECI_OPT_HAS_CG 0x000020
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC;
+ * valid priorities are configured with values 1-8;
+ */
+struct dpseci_cfg {
+ u32 options;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 priorities[DPSECI_MAX_QUEUE_NUM];
+};
+
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ */
+struct dpseci_attr {
+ int id;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u32 options;
+};
+
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue from
+ * the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO,
+ DPSECI_DEST_DPCON
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that channel;
+ * not relevant for 'DPSECI_DEST_NONE' option
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
+ * in 'options'
+ * @dest_cfg: Queue destination parameters; valid only if
+ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpseci_rx_queue_cfg {
+ u32 options;
+ int order_preservation_en;
+ u64 user_ctx;
+ struct dpseci_dest_cfg dest_cfg;
+};
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration on the
+ * queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpseci_rx_queue_attr {
+ u64 user_ctx;
+ int order_preservation_en;
+ struct dpseci_dest_cfg dest_cfg;
+ u32 fqid;
+};
+
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
+ */
+struct dpseci_tx_queue_attr {
+ u32 fqid;
+ u8 priority;
+};
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ * @ip_id: ID for SEC
+ * @major_rev: Major revision number for SEC
+ * @minor_rev: Minor revision number for SEC
+ * @era: SEC Era
+ * @deco_num: The number of copies of the DECO that are implemented in this
+ * version of SEC
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
+ * version of SEC
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
+ * version of SEC
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC
+ * @crc_acc_num: The number of copies of the CRC module that are implemented in
+ * this version of SEC
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC
+ * @rng_acc_num: The number of copies of the Random Number Generator that are
+ * implemented in this version of SEC
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
+ * implemented in this version of SEC
+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
+ * in this version of SEC
+ * @des_acc_num: The number of copies of the DES module that are implemented in
+ * this version of SEC
+ * @aes_acc_num: The number of copies of the AES module that are implemented in
+ * this version of SEC
+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
+ * implemented in this version of SEC.
+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
+ * implemented in this version of SEC.
+ **/
+struct dpseci_sec_attr {
+ u16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+/**
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
+ */
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg);
+
+#endif /* _DPSECI_H_ */
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
new file mode 100644
index 0000000..6ab77ea
--- /dev/null
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _DPSECI_CMD_H_
+#define _DPSECI_CMD_H_
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 3
+
+#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
+#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
+
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
+
+#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION)
+
+#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1 byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+struct dpseci_cmd_open {
+ __le32 dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ u8 is_enabled;
+};
+
+struct dpseci_rsp_get_attributes {
+ __le32 id;
+ __le32 pad0;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 pad1[6];
+ __le32 options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+ u8 order_preservation_en;
+};
+
+struct dpseci_rsp_get_tx_queue {
+ __le32 pad;
+ __le32 fqid;
+ u8 priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ __le16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 pad0[3];
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 pad1;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pad2;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 pad3;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+struct dpseci_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+#define DPSECI_CGN_DEST_TYPE_SHIFT 0
+#define DPSECI_CGN_DEST_TYPE_SIZE 4
+#define DPSECI_CGN_UNITS_SHIFT 4
+#define DPSECI_CGN_UNITS_SIZE 2
+
+struct dpseci_cmd_congestion_notification {
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 priority;
+ u8 options;
+ __le64 message_iova;
+ __le64 message_ctx;
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
+#endif /* _DPSECI_CMD_H_ */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 8da88be..17c6108 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -13,7 +13,7 @@
#ifdef DEBUG
#include <linux/highmem.h>
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii)
{
@@ -22,7 +22,7 @@
size_t len;
void *buf;
- for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+ for (it = sg; it && tlen > 0 ; it = sg_next(it)) {
/*
* make sure the scatterlist's page
* has a valid virtual memory mapping
@@ -35,21 +35,30 @@
buf = it_page + it->offset;
len = min_t(size_t, tlen, it->length);
- print_hex_dump(level, prefix_str, prefix_type, rowsize,
- groupsize, buf, len, ascii);
+ print_hex_dump_debug(prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
tlen -= len;
kunmap_atomic(it_page);
}
}
#else
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii)
{}
#endif /* DEBUG */
EXPORT_SYMBOL(caam_dump_sg);
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+
+size_t caam_ptr_sz;
+EXPORT_SYMBOL(caam_ptr_sz);
+
static const struct {
u8 value;
const char *error_text;
@@ -108,6 +117,55 @@
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
+static const struct {
+ u8 value;
+ const char *error_text;
+} qi_error_list[] = {
+ { 0x00, "No error" },
+ { 0x1F, "Job terminated by FQ or ICID flush" },
+ { 0x20, "FD format error"},
+ { 0x21, "FD command format error"},
+ { 0x23, "FL format error"},
+ { 0x25, "CRJD specified in FD, but not enabled in FLC"},
+ { 0x30, "Max. buffer size too small"},
+ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
+ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
+ { 0x33, "Size over/underflow (allocate mode)"},
+ { 0x34, "Size over/underflow (reuse mode)"},
+ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
+ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
+ { 0x41, "SBC frame format not supported (allocate mode)"},
+ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
+ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
+ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
+ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
+ { 0x46, "Annotation length exceeds offset (reuse mode)"},
+ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
+ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
+ { 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"},
+ { 0x51, "Unsupported IF reuse mode"},
+ { 0x52, "Unsupported FL use mode"},
+ { 0x53, "Unsupported RJD use mode"},
+ { 0x54, "Unsupported inline descriptor use mode"},
+ { 0xC0, "Table buffer pool 0 depletion"},
+ { 0xC1, "Table buffer pool 1 depletion"},
+ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
+ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
+ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
+ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
+ { 0xD0, "FLC read error"},
+ { 0xD1, "FL read error"},
+ { 0xD2, "FL write error"},
+ { 0xD3, "OF SGT write error"},
+ { 0xD4, "PTA read error"},
+ { 0xD5, "PTA write error"},
+ { 0xD6, "OF SGT F-bit write error"},
+ { 0xD7, "ASA write error"},
+ { 0xE1, "FLC[ICR]=0 ICID error"},
+ { 0xE2, "FLC[ICR]=1 ICID error"},
+ { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
+};
+
static const char * const cha_id_list[] = {
"",
"AES",
@@ -156,8 +214,8 @@
"Secure key generation",
};
-static void report_ccb_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_ccb_status(struct device *jrdev, const u32 status,
+ const char *error)
{
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
JRSTA_CCBERR_CHAID_SHIFT;
@@ -193,22 +251,27 @@
* CCB ICV check failures are part of normal operation life;
* we leave the upper layers to do what they want with them.
*/
- if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
- dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
- status, error, idx_str, idx,
- cha_str, cha_err_code,
- err_str, err_err_code);
+ if (err_id == JRSTA_CCBERR_ERRID_ICVCHK)
+ return -EBADMSG;
+
+ dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status,
+ error, idx_str, idx, cha_str, cha_err_code,
+ err_str, err_err_code);
+
+ return -EINVAL;
}
-static void report_jump_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_jump_status(struct device *jrdev, const u32 status,
+ const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
+
+ return -EINVAL;
}
-static void report_deco_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_deco_status(struct device *jrdev, const u32 status,
+ const char *error)
{
u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
@@ -234,27 +297,56 @@
dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
status, error, idx_str, idx, err_str, err_err_code);
+
+ return -EINVAL;
}
-static void report_jr_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_qi_status(struct device *qidev, const u32 status,
+ const char *error)
+{
+ u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
+ const char *err_str = "unidentified error value 0x";
+ char err_err_code[3] = { 0 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
+ if (qi_error_list[i].value == err_id)
+ break;
+
+ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
+ err_str = qi_error_list[i].error_text;
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ dev_err(qidev, "%08x: %s: %s%s\n",
+ status, error, err_str, err_err_code);
+
+ return -EINVAL;
+}
+
+static int report_jr_status(struct device *jrdev, const u32 status,
+ const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
+
+ return -EINVAL;
}
-static void report_cond_code_status(struct device *jrdev, const u32 status,
- const char *error)
+static int report_cond_code_status(struct device *jrdev, const u32 status,
+ const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
+
+ return -EINVAL;
}
-void caam_jr_strstatus(struct device *jrdev, u32 status)
+int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
{
static const struct stat_src {
- void (*report_ssed)(struct device *jrdev, const u32 status,
- const char *error);
+ int (*report_ssed)(struct device *jrdev, const u32 status,
+ const char *error);
const char *error;
} status_src[16] = {
{ NULL, "No error" },
@@ -262,7 +354,7 @@
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
- { NULL, "Queue Manager Interface" },
+ { report_qi_status, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
@@ -282,10 +374,17 @@
* Otherwise print the error source name.
*/
if (status_src[ssrc].report_ssed)
- status_src[ssrc].report_ssed(jrdev, status, error);
- else if (error)
+ return status_src[ssrc].report_ssed(jrdev, status, error);
+
+ if (error)
dev_err(jrdev, "%d: %s\n", ssrc, error);
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
+
+ return -EINVAL;
}
-EXPORT_SYMBOL(caam_jr_strstatus);
+EXPORT_SYMBOL(caam_strstatus);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM error reporting");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 5aa332b..16809fa 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,10 +7,23 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
-#define CAAM_ERROR_STR_MAX 302
-void caam_jr_strstatus(struct device *jrdev, u32 status);
-void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+#include "desc.h"
+
+#define CAAM_ERROR_STR_MAX 302
+
+int caam_strstatus(struct device *dev, u32 status, bool qi_v2);
+
+#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
+#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
+
+void caam_dump_sg(const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
size_t tlen, bool ascii);
+
+static inline bool is_mdha(u32 algtype)
+{
+ return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
+ OP_ALG_CHA_MDHA;
+}
#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index babc78a..731b06b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -4,12 +4,14 @@
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- *
+ * Copyright 2019 NXP
*/
#ifndef INTERN_H
#define INTERN_H
+#include "ctrl.h"
+
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
@@ -49,26 +51,21 @@
atomic_t tfm_count ____cacheline_aligned;
/* Job ring info */
- int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
- int inp_ring_write_index; /* Input index "tail" */
+ u32 inpring_avail; /* Number of free entries in input ring */
int head; /* entinfo (s/w ring) head index */
- dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
- spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
+ void *inpring; /* Base of input ring, alloc
+ * DMA-safe */
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
- struct jr_outentry *outring; /* Base of output ring, DMA-safe */
+ void *outring; /* Base of output ring, DMA-safe */
};
/*
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
-#ifdef CONFIG_CAAM_QI
- struct device *qidev;
-#endif
-
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
struct caam_deco __iomem *deco; /* DECO/CCB views */
@@ -76,12 +73,17 @@
struct caam_queue_if __iomem *qi; /* QI control region */
struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
+ struct iommu_domain *domain;
+
/*
* Detected geometry block. Filled in from device tree if powerpc,
* or from register-based version detection code
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
+#ifdef CONFIG_CAAM_QI
+ u8 qi_init; /* Nonzero if QI has been initialized */
+#endif
u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -93,11 +95,8 @@
Handles of the RNG4 block are initialized
by this driver */
- struct clk *caam_ipg;
- struct clk *caam_mem;
- struct clk *caam_aclk;
- struct clk *caam_emi_slow;
-
+ struct clk_bulk_data *clks;
+ int num_clks;
/*
* debugfs entries for developer view into driver/device
* variables at runtime.
@@ -106,12 +105,98 @@
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
- struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#endif
};
-void caam_jr_algapi_init(struct device *dev);
-void caam_jr_algapi_remove(struct device *dev);
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+
+int caam_algapi_init(struct device *dev);
+void caam_algapi_exit(void);
+
+#else
+
+static inline int caam_algapi_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_algapi_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
+
+int caam_algapi_hash_init(struct device *dev);
+void caam_algapi_hash_exit(void);
+
+#else
+
+static inline int caam_algapi_hash_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_algapi_hash_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
+
+int caam_pkc_init(struct device *dev);
+void caam_pkc_exit(void);
+
+#else
+
+static inline int caam_pkc_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_pkc_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
+
+int caam_rng_init(struct device *dev);
+void caam_rng_exit(void);
+
+#else
+
+static inline int caam_rng_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_rng_exit(void)
+{
+}
+
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
+
+#ifdef CONFIG_CAAM_QI
+
+int caam_qi_algapi_init(struct device *dev);
+void caam_qi_algapi_exit(void);
+
+#else
+
+static inline int caam_qi_algapi_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline void caam_qi_algapi_exit(void)
+{
+}
+
+#endif /* CONFIG_CAAM_QI */
#ifdef CONFIG_DEBUG_FS
static int caam_debugfs_u64_get(void *data, u64 *val)
@@ -130,4 +215,22 @@
DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
#endif
+static inline u64 caam_get_dma_mask(struct device *dev)
+{
+ struct device_node *nprop = dev->of_node;
+
+ if (caam_ptr_sz != sizeof(u64))
+ return DMA_BIT_MASK(32);
+
+ if (caam_dpaa2)
+ return DMA_BIT_MASK(49);
+
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring") ||
+ of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+ return DMA_BIT_MASK(40);
+
+ return DMA_BIT_MASK(36);
+}
+
+
#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index acdd720..fc97cde 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -1,8 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2019 NXP
*/
#include <linux/of_irq.h>
@@ -22,6 +24,43 @@
} ____cacheline_aligned;
static struct jr_driver_data driver_data;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+static void register_algs(struct device *dev)
+{
+ mutex_lock(&algs_lock);
+
+ if (++active_devs != 1)
+ goto algs_unlock;
+
+ caam_algapi_init(dev);
+ caam_algapi_hash_init(dev);
+ caam_pkc_init(dev);
+ caam_rng_init(dev);
+ caam_qi_algapi_init(dev);
+
+algs_unlock:
+ mutex_unlock(&algs_lock);
+}
+
+static void unregister_algs(void)
+{
+ mutex_lock(&algs_lock);
+
+ if (--active_devs != 0)
+ goto algs_unlock;
+
+ caam_qi_algapi_exit();
+
+ caam_rng_exit();
+ caam_pkc_exit();
+ caam_algapi_hash_exit();
+ caam_algapi_exit();
+
+algs_unlock:
+ mutex_unlock(&algs_lock);
+}
static int caam_reset_hw_jr(struct device *dev)
{
@@ -69,25 +108,12 @@
static int caam_jr_shutdown(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- dma_addr_t inpbusaddr, outbusaddr;
int ret;
ret = caam_reset_hw_jr(dev);
tasklet_kill(&jrp->irqtask);
- /* Release interrupt */
- free_irq(jrp->irq, dev);
-
- /* Free rings */
- inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
- outbusaddr = rd_reg64(&jrp->rregs->outring_base);
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- jrp->inpring, inpbusaddr);
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
- jrp->outring, outbusaddr);
- kfree(jrp->entinfo);
-
return ret;
}
@@ -108,6 +134,9 @@
return -EBUSY;
}
+ /* Unregister JR-based RNG & crypto algorithms */
+ unregister_algs();
+
/* Remove the node from Physical JobR list maintained by driver */
spin_lock(&driver_data.jr_alloc_lock);
list_del(&jrpriv->list_node);
@@ -117,7 +146,6 @@
ret = caam_jr_shutdown(jrdev);
if (ret)
dev_err(jrdev, "Failed to shut down job ring\n");
- irq_dispose_mapping(jrpriv->irq);
return ret;
}
@@ -169,20 +197,20 @@
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
void *userarg;
+ u32 outring_used = 0;
- while (rd_reg32(&jrp->rregs->outring_used)) {
+ while (outring_used ||
+ (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
head = READ_ONCE(jrp->head);
- spin_lock(&jrp->outlock);
-
sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
- if (jrp->outring[hw_idx].desc ==
+ if (jr_outentry_desc(jrp->outring, hw_idx) ==
caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
break; /* found */
}
@@ -191,18 +219,20 @@
/* Unmap just-run descriptor so we can post-process */
dma_unmap_single(dev,
- caam_dma_to_cpu(jrp->outring[hw_idx].desc),
+ caam_dma_to_cpu(jr_outentry_desc(jrp->outring,
+ hw_idx)),
jrp->entinfo[sw_idx].desc_size,
DMA_TO_DEVICE);
/* mark completed, avoid matching on a recycled desc addr */
jrp->entinfo[sw_idx].desc_addr_dma = 0;
- /* Stash callback params for use outside of lock */
+ /* Stash callback params */
usercall = jrp->entinfo[sw_idx].callbk;
userarg = jrp->entinfo[sw_idx].cbkarg;
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
- userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
+ userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring,
+ hw_idx));
/*
* Make sure all information from the job has been obtained
@@ -231,10 +261,9 @@
jrp->tail = tail;
}
- spin_unlock(&jrp->outlock);
-
/* Finally, execute user's callback */
usercall(dev, userdesc, userstatus, userarg);
+ outring_used--;
}
/* reenable / unmask IRQs */
@@ -344,7 +373,7 @@
head = jrp->head;
tail = READ_ONCE(jrp->tail);
- if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+ if (!jrp->inpring_avail ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
@@ -358,7 +387,7 @@
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
- jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
+ jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma));
/*
* Guarantee that the descriptor's DMA address has been written to
@@ -367,18 +396,22 @@
*/
smp_wmb();
- jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
- (JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
/*
* Ensure that all job information has been written before
- * notifying CAAM that a new job was added to the input ring.
+ * notifying CAAM that a new job was added to the input ring
+ * using a memory barrier. The wr_reg32() uses api iowrite32()
+ * to do the register write. iowrite32() issues a memory barrier
+ * before the write operation.
*/
- wmb();
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+ jrp->inpring_avail--;
+ if (!jrp->inpring_avail)
+ jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
+
spin_unlock_bh(&jrp->inplock);
return 0;
@@ -396,41 +429,31 @@
jrp = dev_get_drvdata(dev);
- tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
-
- /* Connect job ring interrupt handler. */
- error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
- dev_name(dev), dev);
- if (error) {
- dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
- jrp->ridx, jrp->irq);
- goto out_kill_deq;
- }
-
error = caam_reset_hw_jr(dev);
if (error)
- goto out_free_irq;
+ return error;
- error = -ENOMEM;
- jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
- JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
+ jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY *
+ JOBR_DEPTH, &inpbusaddr,
+ GFP_KERNEL);
if (!jrp->inpring)
- goto out_free_irq;
+ return -ENOMEM;
- jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
- JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+ jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY *
+ JOBR_DEPTH, &outbusaddr,
+ GFP_KERNEL);
if (!jrp->outring)
- goto out_free_inpring;
+ return -ENOMEM;
- jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
+ jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo),
+ GFP_KERNEL);
if (!jrp->entinfo)
- goto out_free_outring;
+ return -ENOMEM;
for (i = 0; i < JOBR_DEPTH; i++)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
- jrp->inp_ring_write_index = 0;
jrp->out_ring_read_index = 0;
jrp->head = 0;
jrp->tail = 0;
@@ -440,32 +463,33 @@
wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
- jrp->ringsize = JOBR_DEPTH;
+ jrp->inpring_avail = JOBR_DEPTH;
spin_lock_init(&jrp->inplock);
- spin_lock_init(&jrp->outlock);
/* Select interrupt coalescing parameters */
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
(JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
- return 0;
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
-out_free_outring:
- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
- jrp->outring, outbusaddr);
-out_free_inpring:
- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
- jrp->inpring, inpbusaddr);
- dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
-out_free_irq:
- free_irq(jrp->irq, dev);
-out_kill_deq:
- tasklet_kill(&jrp->irqtask);
+ /* Connect job ring interrupt handler. */
+ error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
+ if (error) {
+ dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+ jrp->ridx, jrp->irq);
+ tasklet_kill(&jrp->irqtask);
+ }
+
return error;
}
+static void caam_jr_irq_dispose_mapping(void *data)
+{
+ irq_dispose_mapping((unsigned long)data);
+}
/*
* Probe routine for each detected JobR subsystem.
@@ -477,6 +501,7 @@
struct caam_job_ring __iomem *ctrl;
struct caam_drv_private_jr *jrpriv;
static int total_jobrs;
+ struct resource *r;
int error;
jrdev = &pdev->dev;
@@ -492,45 +517,43 @@
nprop = pdev->dev.of_node;
/* Get configuration properties from device tree */
/* First, get register page */
- ctrl = of_iomap(nprop, 0);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(jrdev, "platform_get_resource() failed\n");
+ return -ENOMEM;
+ }
+
+ ctrl = devm_ioremap(jrdev, r->start, resource_size(r));
if (!ctrl) {
- dev_err(jrdev, "of_iomap() failed\n");
+ dev_err(jrdev, "devm_ioremap() failed\n");
return -ENOMEM;
}
jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
- if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (caam_dpaa2)
- error = dma_set_mask_and_coherent(jrdev,
- DMA_BIT_MASK(49));
- else if (of_device_is_compatible(nprop,
- "fsl,sec-v5.0-job-ring"))
- error = dma_set_mask_and_coherent(jrdev,
- DMA_BIT_MASK(40));
- else
- error = dma_set_mask_and_coherent(jrdev,
- DMA_BIT_MASK(36));
- } else {
- error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
- }
+ error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev));
if (error) {
dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
error);
- iounmap(ctrl);
return error;
}
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
+ if (!jrpriv->irq) {
+ dev_err(jrdev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping,
+ (void *)(unsigned long)jrpriv->irq);
+ if (error)
+ return error;
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
- if (error) {
- irq_dispose_mapping(jrpriv->irq);
- iounmap(ctrl);
+ if (error)
return error;
- }
jrpriv->dev = jrdev;
spin_lock(&driver_data.jr_alloc_lock);
@@ -539,6 +562,8 @@
atomic_set(&jrpriv->tfm_count, 0);
+ register_algs(jrdev->parent);
+
return 0;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 312b5f0..5a851dd 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -15,15 +15,14 @@
void *context)
{
struct split_key_result *res = context;
+ int ecode = 0;
-#ifdef DEBUG
- dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
+ dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
if (err)
- caam_jr_strstatus(dev, err);
+ ecode = caam_jr_strstatus(dev, err);
- res->err = err;
+ res->err = ecode;
complete(&res->completion);
}
@@ -48,21 +47,21 @@
{
u32 *desc;
struct split_key_result result;
- dma_addr_t dma_addr_in, dma_addr_out;
+ dma_addr_t dma_addr;
+ unsigned int local_max;
int ret = -ENOMEM;
adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
adata->keylen_pad = split_key_pad_len(adata->algtype &
OP_ALG_ALGSEL_MASK);
+ local_max = max(keylen, adata->keylen_pad);
-#ifdef DEBUG
- dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
adata->keylen, adata->keylen_pad);
- print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
-#endif
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
- if (adata->keylen_pad > max_keylen)
+ if (local_max > max_keylen)
return -EINVAL;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
@@ -71,22 +70,16 @@
return ret;
}
- dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_in)) {
- dev_err(jrdev, "unable to map key input memory\n");
+ memcpy(key_out, key_in, keylen);
+
+ dma_addr = dma_map_single(jrdev, key_out, local_max, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(jrdev, dma_addr)) {
+ dev_err(jrdev, "unable to map key memory\n");
goto out_free;
}
- dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_out)) {
- dev_err(jrdev, "unable to map key output memory\n");
- goto out_unmap_in;
- }
-
init_job_desc(desc, 0);
- append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+ append_key(desc, dma_addr, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
@@ -104,15 +97,12 @@
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- append_fifo_store(desc, dma_addr_out, adata->keylen,
+ append_fifo_store(desc, dma_addr, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
result.err = 0;
init_completion(&result.completion);
@@ -122,17 +112,13 @@
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- adata->keylen_pad, 1);
-#endif
+
+ print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+ adata->keylen_pad, 1);
}
- dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
- DMA_FROM_DEVICE);
-out_unmap_in:
- dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+ dma_unmap_single(jrdev, dma_addr, local_max, DMA_BIDIRECTIONAL);
out_free:
kfree(desc);
return ret;
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 810f0be..68c1fd5 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -512,7 +512,9 @@
dma_addr_t n_dma;
dma_addr_t e_dma;
u32 f_len;
-} __packed;
+};
+
+#define SIZEOF_RSA_PUB_PDB (2 * sizeof(u32) + 4 * caam_ptr_sz)
/**
* RSA Decrypt PDB - Private Key Form #1
@@ -528,7 +530,9 @@
dma_addr_t f_dma;
dma_addr_t n_dma;
dma_addr_t d_dma;
-} __packed;
+};
+
+#define SIZEOF_RSA_PRIV_F1_PDB (sizeof(u32) + 4 * caam_ptr_sz)
/**
* RSA Decrypt PDB - Private Key Form #2
@@ -554,7 +558,9 @@
dma_addr_t tmp1_dma;
dma_addr_t tmp2_dma;
u32 p_q_len;
-} __packed;
+};
+
+#define SIZEOF_RSA_PRIV_F2_PDB (2 * sizeof(u32) + 7 * caam_ptr_sz)
/**
* RSA Decrypt PDB - Private Key Form #3
@@ -586,6 +592,8 @@
dma_addr_t tmp1_dma;
dma_addr_t tmp2_dma;
u32 p_q_len;
-} __packed;
+};
+
+#define SIZEOF_RSA_PRIV_F3_PDB (2 * sizeof(u32) + 9 * caam_ptr_sz)
#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
index 2a8d87e..0d5ee76 100644
--- a/drivers/crypto/caam/pkc_desc.c
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -13,7 +13,7 @@
/* Descriptor for RSA Public operation */
void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
{
- init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ init_job_desc_pdb(desc, 0, SIZEOF_RSA_PUB_PDB);
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->f_dma);
append_ptr(desc, pdb->g_dma);
@@ -26,7 +26,7 @@
/* Descriptor for RSA Private operation - Private Key Form #1 */
void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
{
- init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F1_PDB);
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->g_dma);
append_ptr(desc, pdb->f_dma);
@@ -39,7 +39,7 @@
/* Descriptor for RSA Private operation - Private Key Form #2 */
void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
{
- init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F2_PDB);
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->g_dma);
append_ptr(desc, pdb->f_dma);
@@ -56,7 +56,7 @@
/* Descriptor for RSA Private operation - Private Key Form #3 */
void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
{
- init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F3_PDB);
append_cmd(desc, pdb->sgf);
append_ptr(desc, pdb->g_dma);
append_ptr(desc, pdb->f_dma);
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 67f7f8c..378f627 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -4,7 +4,7 @@
* Queue Interface backend functionality
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2017, 2019 NXP
*/
#include <linux/cpumask.h>
@@ -18,6 +18,7 @@
#include "desc_constr.h"
#define PREHDR_RSLS_SHIFT 31
+#define PREHDR_ABS BIT(25)
/*
* Use a reasonable backlog of frames (per CPU) as congestion threshold,
@@ -58,11 +59,9 @@
/*
* caam_qi_priv - CAAM QI backend private params
* @cgr: QMan congestion group
- * @qi_pdev: platform device for QI backend
*/
struct caam_qi_priv {
struct qman_cgr cgr;
- struct platform_device *qi_pdev;
};
static struct caam_qi_priv qipriv ____cacheline_aligned;
@@ -84,13 +83,6 @@
#endif
/*
- * CPU from where the module initialised. This is required because QMan driver
- * requires CGRs to be removed from same CPU from where they were originally
- * allocated.
- */
-static int mod_init_cpu;
-
-/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
* doing malloc on the hotpath.
@@ -102,6 +94,16 @@
*/
static struct kmem_cache *qi_cache;
+static void *caam_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
{
struct qm_fd fd;
@@ -142,6 +144,7 @@
const struct qm_fd *fd;
struct caam_drv_req *drv_req;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
fd = &msg->ern.fd;
@@ -150,7 +153,7 @@
return;
}
- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (!drv_req) {
dev_err(qidev,
"Can't find original request for CAAM response\n");
@@ -160,7 +163,10 @@
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
- drv_req->cbk(drv_req, -EIO);
+ if (fd->status)
+ drv_req->cbk(drv_req, be32_to_cpu(fd->status));
+ else
+ drv_req->cbk(drv_req, JRSTA_SSRC_QI);
}
static struct qman_fq *create_caam_req_fq(struct device *qidev,
@@ -325,7 +331,7 @@
/* Create a new req FQ in parked state */
new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
drv_ctx->context_a, 0);
- if (unlikely(IS_ERR_OR_NULL(new_fq))) {
+ if (IS_ERR(new_fq)) {
dev_err(qidev, "FQ allocation for shdesc update failed\n");
return PTR_ERR(new_fq);
}
@@ -353,6 +359,7 @@
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
+ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
dma_sync_single_for_device(qidev, drv_ctx->context_a,
sizeof(drv_ctx->sh_desc) +
@@ -408,6 +415,7 @@
*/
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
num_words);
+ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
@@ -438,7 +446,7 @@
/* Attach request FQ */
drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
QMAN_INITFQ_FLAG_SCHED);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
+ if (IS_ERR(drv_ctx->req_fq)) {
dev_err(qidev, "create_caam_req_fq failed\n");
dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
kfree(drv_ctx);
@@ -492,12 +500,11 @@
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-int caam_qi_shutdown(struct device *qidev)
+void caam_qi_shutdown(struct device *qidev)
{
- int i, ret;
- struct caam_qi_priv *priv = dev_get_drvdata(qidev);
+ int i;
+ struct caam_qi_priv *priv = &qipriv;
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
for_each_cpu(i, cpus) {
struct napi_struct *irqtask;
@@ -510,26 +517,10 @@
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
}
- /*
- * QMan driver requires CGRs to be deleted from same CPU from where they
- * were instantiated. Hence we get the module removal execute from the
- * same CPU from where it was originally inserted.
- */
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
- ret = qman_delete_cgr(&priv->cgr);
- if (ret)
- dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
- else
- qman_release_cgrid(priv->cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr);
+ qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
-
- /* Now that we're done with the CGRs, restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
-
- platform_device_unregister(priv->qi_pdev);
- return ret;
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -572,6 +563,7 @@
struct caam_drv_req *drv_req;
const struct qm_fd *fd;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
u32 status;
if (caam_qi_napi_schedule(p, caam_napi))
@@ -585,8 +577,9 @@
if (ssrc != JRSTA_SSRC_CCB_ERROR ||
err_id != JRSTA_CCBERR_ERRID_ICVCHK)
- dev_err(qidev, "Error: %#x in CAAM response FD\n",
- status);
+ dev_err_ratelimited(qidev,
+ "Error: %#x in CAAM response FD\n",
+ status);
}
if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
@@ -594,7 +587,7 @@
return qman_cb_dqrr_consume;
}
- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (unlikely(!drv_req)) {
dev_err(qidev,
"Can't find original request for caam response\n");
@@ -714,44 +707,17 @@
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
- struct platform_device *qi_pdev;
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
- static struct platform_device_info qi_pdev_info = {
- .name = "caam_qi",
- .id = PLATFORM_DEVID_NONE
- };
-
- /*
- * QMAN requires CGRs to be removed from same CPU+portal from where it
- * was originally allocated. Hence we need to note down the
- * initialisation CPU and use the same CPU for module exit.
- * We select the first CPU to from the list of portal owning CPUs.
- * Then we pin module init to this CPU.
- */
- mod_init_cpu = cpumask_first(cpus);
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
- qi_pdev_info.parent = ctrldev;
- qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
- qi_pdev = platform_device_register_full(&qi_pdev_info);
- if (IS_ERR(qi_pdev))
- return PTR_ERR(qi_pdev);
- set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
ctrlpriv = dev_get_drvdata(ctrldev);
- qidev = &qi_pdev->dev;
-
- qipriv.qi_pdev = qi_pdev;
- dev_set_drvdata(qidev, &qipriv);
+ qidev = ctrldev;
/* Initialize the congestion detection */
err = init_cgr(qidev);
if (err) {
dev_err(qidev, "CGR initialization failed: %d\n", err);
- platform_device_unregister(qi_pdev);
return err;
}
@@ -760,7 +726,6 @@
if (err) {
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
free_rsp_fqs();
- platform_device_unregister(qi_pdev);
return err;
}
@@ -783,24 +748,20 @@
napi_enable(irqtask);
}
- /* Hook up QI device to parent controlling caam device */
- ctrlpriv->qidev = qidev;
-
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
SLAB_CACHE_DMA, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
free_rsp_fqs();
- platform_device_unregister(qi_pdev);
return -ENOMEM;
}
- /* Done with the CGRs; restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
×_congested, &caam_fops_u64_ro);
#endif
+
+ ctrlpriv->qi_init = 1;
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 357b69f..db05495 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -14,32 +14,6 @@
#include "desc.h"
#include "desc_constr.h"
-/*
- * CAAM hardware constructs a job descriptor which points to a shared descriptor
- * (as pointed by context_a of to-CAAM FQ).
- * When the job descriptor is executed by DECO, the whole job descriptor
- * together with shared descriptor gets loaded in DECO buffer, which is
- * 64 words (each 32-bit) long.
- *
- * The job descriptor constructed by CAAM hardware has the following layout:
- *
- * HEADER (1 word)
- * Shdesc ptr (1 or 2 words)
- * SEQ_OUT_PTR (1 word)
- * Out ptr (1 or 2 words)
- * Out length (1 word)
- * SEQ_IN_PTR (1 word)
- * In ptr (1 or 2 words)
- * In length (1 word)
- *
- * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
- *
- * Apart from shdesc contents, the total number of words that get loaded in DECO
- * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
- * storing shared descriptor.
- */
-#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
-
/* Length of a single buffer in the QI driver memory cache */
#define CAAM_QI_MEMCACHE_SIZE 768
@@ -62,7 +36,6 @@
enum optype {
ENCRYPT,
DECRYPT,
- GIVENCRYPT,
NUM_OP
};
@@ -174,7 +147,7 @@
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-int caam_qi_shutdown(struct device *dev);
+void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index ce3f9ad..05127b7 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -3,6 +3,7 @@
* CAAM hardware register-level view
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*/
#ifndef REGS_H
@@ -11,6 +12,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
/*
* Architecture-specific register access methods
@@ -69,6 +71,7 @@
extern bool caam_little_end;
extern bool caam_imx;
+extern size_t caam_ptr_sz;
#define caam_to_cpu(len) \
static inline u##len caam##len ## _to_cpu(u##len val) \
@@ -136,45 +139,37 @@
* base + 0x0000 : least-significant 32 bits
* base + 0x0004 : most-significant 32 bits
*/
-#ifdef CONFIG_64BIT
static inline void wr_reg64(void __iomem *reg, u64 data)
{
- if (caam_little_end)
- iowrite64(data, reg);
- else
- iowrite64be(data, reg);
-}
-
-static inline u64 rd_reg64(void __iomem *reg)
-{
- if (caam_little_end)
- return ioread64(reg);
- else
- return ioread64be(reg);
-}
-
-#else /* CONFIG_64BIT */
-static inline void wr_reg64(void __iomem *reg, u64 data)
-{
- if (!caam_imx && caam_little_end) {
- wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
- wr_reg32((u32 __iomem *)(reg), data);
+ if (caam_little_end) {
+ if (caam_imx) {
+ iowrite32(data >> 32, (u32 __iomem *)(reg));
+ iowrite32(data, (u32 __iomem *)(reg) + 1);
+ } else {
+ iowrite64(data, reg);
+ }
} else {
- wr_reg32((u32 __iomem *)(reg), data >> 32);
- wr_reg32((u32 __iomem *)(reg) + 1, data);
+ iowrite64be(data, reg);
}
}
static inline u64 rd_reg64(void __iomem *reg)
{
- if (!caam_imx && caam_little_end)
- return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
- (u64)rd_reg32((u32 __iomem *)(reg)));
+ if (caam_little_end) {
+ if (caam_imx) {
+ u32 low, high;
- return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
- (u64)rd_reg32((u32 __iomem *)(reg) + 1));
+ high = ioread32(reg);
+ low = ioread32(reg + sizeof(u32));
+
+ return low + ((u64)high << 32);
+ } else {
+ return ioread64(reg);
+ }
+ } else {
+ return ioread64be(reg);
+ }
}
-#endif /* CONFIG_64BIT */
static inline u64 cpu_to_caam_dma64(dma_addr_t value)
{
@@ -194,22 +189,133 @@
return caam64_to_cpu(value);
}
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
-#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
-#else
-#define cpu_to_caam_dma(value) cpu_to_caam32(value)
-#define caam_dma_to_cpu(value) caam32_to_cpu(value)
-#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+static inline u64 cpu_to_caam_dma(u64 value)
+{
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ caam_ptr_sz == sizeof(u64))
+ return cpu_to_caam_dma64(value);
+ else
+ return cpu_to_caam32(value);
+}
+
+static inline u64 caam_dma_to_cpu(u64 value)
+{
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ caam_ptr_sz == sizeof(u64))
+ return caam_dma64_to_cpu(value);
+ else
+ return caam32_to_cpu(value);
+}
/*
* jr_outentry
* Represents each entry in a JobR output ring
*/
-struct jr_outentry {
- dma_addr_t desc;/* Pointer to completed descriptor */
- u32 jrstatus; /* Status for completed descriptor */
-} __packed;
+
+static inline void jr_outentry_get(void *outring, int hw_idx, dma_addr_t *desc,
+ u32 *jrstatus)
+{
+
+ if (caam_ptr_sz == sizeof(u32)) {
+ struct {
+ u32 desc;
+ u32 jrstatus;
+ } __packed *outentry = outring;
+
+ *desc = outentry[hw_idx].desc;
+ *jrstatus = outentry[hw_idx].jrstatus;
+ } else {
+ struct {
+ dma_addr_t desc;/* Pointer to completed descriptor */
+ u32 jrstatus; /* Status for completed descriptor */
+ } __packed *outentry = outring;
+
+ *desc = outentry[hw_idx].desc;
+ *jrstatus = outentry[hw_idx].jrstatus;
+ }
+}
+
+#define SIZEOF_JR_OUTENTRY (caam_ptr_sz + sizeof(u32))
+
+static inline dma_addr_t jr_outentry_desc(void *outring, int hw_idx)
+{
+ dma_addr_t desc;
+ u32 unused;
+
+ jr_outentry_get(outring, hw_idx, &desc, &unused);
+
+ return desc;
+}
+
+static inline u32 jr_outentry_jrstatus(void *outring, int hw_idx)
+{
+ dma_addr_t unused;
+ u32 jrstatus;
+
+ jr_outentry_get(outring, hw_idx, &unused, &jrstatus);
+
+ return jrstatus;
+}
+
+static inline void jr_inpentry_set(void *inpring, int hw_idx, dma_addr_t val)
+{
+ if (caam_ptr_sz == sizeof(u32)) {
+ u32 *inpentry = inpring;
+
+ inpentry[hw_idx] = val;
+ } else {
+ dma_addr_t *inpentry = inpring;
+
+ inpentry[hw_idx] = val;
+ }
+}
+
+#define SIZEOF_JR_INPENTRY caam_ptr_sz
+
+
+/* Version registers (Era 10+) e80-eff */
+struct version_regs {
+ u32 crca; /* CRCA_VERSION */
+ u32 afha; /* AFHA_VERSION */
+ u32 kfha; /* KFHA_VERSION */
+ u32 pkha; /* PKHA_VERSION */
+ u32 aesa; /* AESA_VERSION */
+ u32 mdha; /* MDHA_VERSION */
+ u32 desa; /* DESA_VERSION */
+ u32 snw8a; /* SNW8A_VERSION */
+ u32 snw9a; /* SNW9A_VERSION */
+ u32 zuce; /* ZUCE_VERSION */
+ u32 zuca; /* ZUCA_VERSION */
+ u32 ccha; /* CCHA_VERSION */
+ u32 ptha; /* PTHA_VERSION */
+ u32 rng; /* RNG_VERSION */
+ u32 trng; /* TRNG_VERSION */
+ u32 aaha; /* AAHA_VERSION */
+ u32 rsvd[10];
+ u32 sr; /* SR_VERSION */
+ u32 dma; /* DMA_VERSION */
+ u32 ai; /* AI_VERSION */
+ u32 qi; /* QI_VERSION */
+ u32 jr; /* JR_VERSION */
+ u32 deco; /* DECO_VERSION */
+};
+
+/* Version registers bitfields */
+
+/* Number of CHAs instantiated */
+#define CHA_VER_NUM_MASK 0xffull
+/* CHA Miscellaneous Information */
+#define CHA_VER_MISC_SHIFT 8
+#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
+/* CHA Revision Number */
+#define CHA_VER_REV_SHIFT 16
+#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
+/* CHA Version ID */
+#define CHA_VER_VID_SHIFT 24
+#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
+
+/* CHA Miscellaneous Information - AESA_MISC specific */
+#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
@@ -223,15 +329,13 @@
#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
/*
- * CHA version IDs / instantiation bitfields
+ * CHA version IDs / instantiation bitfields (< Era 10)
* Defined for use with the cha_id fields in perfmon, but the same shift/mask
* selectors can be used to pull out the number of instantiated blocks within
* cha_num fields in perfmon because the locations are the same.
*/
#define CHA_ID_LS_AES_SHIFT 0
#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
-#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
-#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
#define CHA_ID_LS_DES_SHIFT 4
#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
@@ -241,9 +345,6 @@
#define CHA_ID_LS_MD_SHIFT 12
#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
#define CHA_ID_LS_RNG_SHIFT 16
#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
@@ -269,6 +370,13 @@
#define CHA_ID_MS_JR_SHIFT 28
#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
+/* Specific CHA version IDs */
+#define CHA_VER_VID_AES_LP 0x3ull
+#define CHA_VER_VID_AES_HP 0x4ull
+#define CHA_VER_VID_MD_LP256 0x0ull
+#define CHA_VER_VID_MD_LP512 0x1ull
+#define CHA_VER_VID_MD_HP 0x2ull
+
struct sec_vid {
u16 ip_id;
u8 maj_rev;
@@ -291,6 +399,7 @@
u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
#define CTPR_MS_QI_SHIFT 25
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_PS BIT(17)
#define CTPR_MS_DPAA2 BIT(13)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
@@ -479,8 +588,10 @@
struct rng4tst r4tst[2];
};
- u32 rsvd9[448];
+ u32 rsvd9[416];
+ /* Version registers - introduced with era 10 e80-eff */
+ struct version_regs vreg;
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
};
@@ -570,8 +681,10 @@
u32 rsvd11;
u32 jrcommand; /* JRCRx - JobR command */
- u32 rsvd12[932];
+ u32 rsvd12[900];
+ /* Version registers - introduced with era 10 e80-eff */
+ struct version_regs vreg;
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
};
@@ -590,6 +703,7 @@
#define JRSTA_SSRC_CCB_ERROR 0x20000000
#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
#define JRSTA_SSRC_DECO 0x40000000
+#define JRSTA_SSRC_QI 0x50000000
#define JRSTA_SSRC_JRERROR 0x60000000
#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000
@@ -633,6 +747,8 @@
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
+#define JRSTA_QIERR_ERROR_MASK 0x00ff
+
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
@@ -876,13 +992,19 @@
u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
u32 rscvd30[193];
-#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
#define DESC_DBG_DECO_STAT_VALID 0x80000000
#define DESC_DBG_DECO_STAT_MASK 0x00F00000
+#define DESC_DBG_DECO_STAT_SHIFT 20
u32 desc_dbg; /* DxDDR - DECO Debug Register */
- u32 rsvd31[126];
+ u32 rsvd31[13];
+#define DESC_DER_DECO_STAT_MASK 0x000F0000
+#define DESC_DER_DECO_STAT_SHIFT 16
+ u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
+ u32 rsvd32[112];
};
+#define DECO_STAT_HOST_ERR 0xD
+
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index d000b4d..d56cc7e 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SG_SW_QM_H
@@ -81,15 +54,19 @@
* but does not have final bit; instead, returns last entry
*/
static inline struct qm_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
- while (sg_count && sg) {
- dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
qm_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return qm_sg_ptr - 1;
}
@@ -98,10 +75,10 @@
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
}
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index b5b4c12..b8b737d 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -1,35 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2015-2016 Freescale Semiconductor, Inc.
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the names of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SG_SW_QM2_H_
@@ -53,15 +25,19 @@
* but does not have final bit; instead, returns last entry
*/
static inline struct dpaa2_sg_entry *
-sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+sg_to_qm_sg(struct scatterlist *sg, int len,
struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
{
- while (sg_count && sg) {
- dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
qm_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return qm_sg_ptr - 1;
}
@@ -70,11 +46,11 @@
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct dpaa2_sg_entry *qm_sg_ptr,
u16 offset)
{
- qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
dpaa2_sg_set_final(qm_sg_ptr, true);
}
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index dbfa9fc..07e1ee9 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -35,11 +35,9 @@
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
SEC4_SG_OFFSET_MASK);
}
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
- DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
- sizeof(struct sec4_sg_entry), 1);
-#endif
+
+ print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1);
}
/*
@@ -47,15 +45,19 @@
* but does not have final bit; instead, returns last entry
*/
static inline struct sec4_sg_entry *
-sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+sg_to_sec4_sg(struct scatterlist *sg, int len,
struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
{
- while (sg_count) {
- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
+ int ent_len;
+
+ while (len) {
+ ent_len = min_t(int, sg_dma_len(sg), len);
+
+ dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len,
+ offset);
sec4_sg_ptr++;
sg = sg_next(sg);
- sg_count--;
+ len -= ent_len;
}
return sec4_sg_ptr - 1;
}
@@ -72,11 +74,11 @@
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
-static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len,
struct sec4_sg_entry *sec4_sg_ptr,
u16 offset)
{
- sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+ sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset);
sg_to_sec4_set_last(sec4_sg_ptr);
}