Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index bdc2797..5cfda50 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_sram_mgr.o
ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 01b82b8..d3e8faa 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
-#include <crypto/des.h>
+#include <crypto/internal/des.h>
#include <linux/rtnetlink.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
@@ -23,12 +23,8 @@
#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
-#define AES_CCM_RFC4309_NONCE_SIZE 3
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
-/* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01
-
struct cc_aead_handle {
cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
@@ -58,6 +54,7 @@
unsigned int enc_keylen;
unsigned int auth_keylen;
unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
+ unsigned int hash_len;
enum drv_cipher_mode cipher_mode;
enum cc_flow_mode flow_mode;
enum drv_hash_mode auth_mode;
@@ -122,6 +119,13 @@
}
}
+static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return cc_get_default_hash_len(ctx->drvdata);
+}
+
static int cc_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
@@ -196,6 +200,7 @@
ctx->auth_state.hmac.ipad_opad = NULL;
ctx->auth_state.hmac.padded_authkey = NULL;
}
+ ctx->hash_len = cc_get_aead_hash_len(tfm);
return 0;
@@ -211,6 +216,10 @@
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ /* BACKLOG notification */
+ if (err == -EINPROGRESS)
+ goto done;
+
cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
@@ -227,31 +236,17 @@
/* In case of payload authentication failure, MUST NOT
* revealed the decrypted message --> zero its memory.
*/
- cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
+ sg_zero_buffer(areq->dst, sg_nents(areq->dst),
+ areq->cryptlen, 0);
err = -EBADMSG;
}
- } else { /*ENCRYPT*/
- if (areq_ctx->is_icv_fragmented) {
- u32 skip = areq->cryptlen + areq_ctx->dst_offset;
+ /*ENCRYPT*/
+ } else if (areq_ctx->is_icv_fragmented) {
+ u32 skip = areq->cryptlen + areq_ctx->dst_offset;
- cc_copy_sg_portion(dev, areq_ctx->mac_buf,
- areq_ctx->dst_sgl, skip,
- (skip + ctx->authsize),
- CC_SG_FROM_BUF);
- }
-
- /* If an IV was generated, copy it back to the user provided
- * buffer.
- */
- if (areq_ctx->backup_giv) {
- if (ctx->cipher_mode == DRV_CIPHER_CTR)
- memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
- CTR_RFC3686_NONCE_SIZE,
- CTR_RFC3686_IV_SIZE);
- else if (ctx->cipher_mode == DRV_CIPHER_CCM)
- memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
- CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
- }
+ cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
+ skip, (skip + ctx->authsize),
+ CC_SG_FROM_BUF);
}
done:
aead_request_complete(areq, err);
@@ -327,7 +322,7 @@
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -415,7 +410,7 @@
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
@@ -428,6 +423,7 @@
unsigned int hashmode;
unsigned int idx = 0;
int rc = 0;
+ u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -446,11 +442,17 @@
}
if (keylen != 0) {
+
+ key = kmemdup(authkey, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(key);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -465,7 +467,7 @@
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hashmode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -533,6 +535,8 @@
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+ kzfree(key);
+
return rc;
}
@@ -540,13 +544,12 @@
unsigned int keylen)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {};
- struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
- int rc = -EINVAL;
unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata);
+ const u8 *enckey, *authkey;
+ int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -554,35 +557,33 @@
/* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
- if (!RTA_OK(rta, keylen))
+ struct crypto_authenc_keys keys;
+
+ rc = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (rc)
goto badkey;
- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
- param = RTA_DATA(rta);
- ctx->enc_keylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < ctx->enc_keylen)
- goto badkey;
- ctx->auth_keylen = keylen - ctx->enc_keylen;
+ enckey = keys.enckey;
+ authkey = keys.authkey;
+ ctx->enc_keylen = keys.enckeylen;
+ ctx->auth_keylen = keys.authkeylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */
+ rc = -EINVAL;
if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey;
/* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV
*/
- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
- ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
- CTR_RFC3686_NONCE_SIZE);
+ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
+ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
}
} else { /* non-authenc - has just one key */
+ enckey = key;
+ authkey = NULL;
ctx->enc_keylen = keylen;
ctx->auth_keylen = 0;
}
@@ -594,13 +595,14 @@
/* STAT_PHASE_1: Copy key to ctx */
/* Get key material */
- memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+ memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
- memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
+ ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
- rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc)
goto badkey;
}
@@ -643,6 +645,23 @@
return rc;
}
+static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ return err;
+
+ err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
+ cc_aead_setkey(aead, key, keylen);
+
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -724,7 +743,7 @@
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT);
+ areq_ctx->assoclen, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
areq_ctx->cryptlen > 0)
@@ -1001,7 +1020,7 @@
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1073,9 +1092,11 @@
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
+
/* Hash associated data */
- if (req->assoclen > 0)
+ if (areq_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
@@ -1098,7 +1119,7 @@
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_cipher_do(&desc[idx], DO_PAD);
@@ -1128,7 +1149,7 @@
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -1152,9 +1173,9 @@
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
- !req_ctx->is_single_pass) {
+ !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
@@ -1303,7 +1324,7 @@
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- unsigned int assoclen = req->assoclen;
+ unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1462,7 +1483,7 @@
idx++;
/* process assoc data */
- if (req->assoclen > 0) {
+ if (req_ctx->assoclen > 0) {
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
@@ -1554,7 +1575,7 @@
* NIST Special Publication 800-38C
*/
*b0 |= (8 * ((m - 2) / 2));
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@@ -1565,7 +1586,7 @@
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
- req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
@@ -1597,7 +1618,7 @@
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= CCM_BLOCK_IV_SIZE;
+ areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1805,7 +1826,7 @@
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
@@ -1829,8 +1850,8 @@
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
- __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1846,7 +1867,7 @@
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
- temp64 = cpu_to_be64(req->assoclen * 8);
+ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1856,8 +1877,8 @@
*/
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
- cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen +
+ GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1877,7 +1898,7 @@
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+ areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1902,7 +1923,7 @@
/* Check data length according to mode */
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, areq_ctx->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -1924,9 +1945,8 @@
*/
memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
CTR_RFC3686_NONCE_SIZE);
- if (!areq_ctx->backup_giv) /*User none-generated IV*/
- memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
- req->iv, CTR_RFC3686_IV_SIZE);
+ memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
/* Initialize counter portion of counter block */
*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
@@ -1972,40 +1992,6 @@
goto exit;
}
- /* do we need to generate IV? */
- if (areq_ctx->backup_giv) {
- /* set the DMA mapped IV address*/
- if (ctx->cipher_mode == DRV_CIPHER_CTR) {
- cc_req.ivgen_dma_addr[0] =
- areq_ctx->gen_ctx.iv_dma_addr +
- CTR_RFC3686_NONCE_SIZE;
- cc_req.ivgen_dma_addr_len = 1;
- } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
- /* In ccm, the IV needs to exist both inside B0 and
- * inside the counter.It is also copied to iv_dma_addr
- * for other reasons (like returning it to the user).
- * So, using 3 (identical) IV outputs.
- */
- cc_req.ivgen_dma_addr[0] =
- areq_ctx->gen_ctx.iv_dma_addr +
- CCM_BLOCK_IV_OFFSET;
- cc_req.ivgen_dma_addr[1] =
- sg_dma_address(&areq_ctx->ccm_adata_sg) +
- CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
- cc_req.ivgen_dma_addr[2] =
- sg_dma_address(&areq_ctx->ccm_adata_sg) +
- CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
- cc_req.ivgen_dma_addr_len = 3;
- } else {
- cc_req.ivgen_dma_addr[0] =
- areq_ctx->gen_ctx.iv_dma_addr;
- cc_req.ivgen_dma_addr_len = 1;
- }
-
- /* set the IV size (8/16 B long)*/
- cc_req.ivgen_size = crypto_aead_ivsize(tfm);
- }
-
/* STAT_PHASE_2: Create sequence */
/* Load MLLI tables to SRAM if necessary */
@@ -2051,9 +2037,11 @@
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->backup_giv = NULL;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->is_gcm4543 = false;
areq_ctx->plaintext_authenticate_only = false;
@@ -2080,9 +2068,11 @@
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->backup_giv = NULL;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->is_gcm4543 = true;
cc_proc_rfc4309_ccm(req);
@@ -2099,9 +2089,11 @@
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->backup_giv = NULL;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->is_gcm4543 = false;
areq_ctx->plaintext_authenticate_only = false;
@@ -2126,9 +2118,11 @@
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->backup_giv = NULL;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->is_gcm4543 = true;
cc_proc_rfc4309_ccm(req);
@@ -2243,10 +2237,11 @@
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->backup_giv = NULL;
-
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->plaintext_authenticate_only = false;
cc_proc_rfc4_gcm(req);
@@ -2262,16 +2257,25 @@
static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
{
/* Very similar to cc_aead_encrypt() above. */
-
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc;
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->backup_giv = NULL;
+ areq_ctx->assoclen = req->assoclen;
cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
@@ -2279,7 +2283,7 @@
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
-
+out:
return rc;
}
@@ -2298,10 +2302,11 @@
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->backup_giv = NULL;
-
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->plaintext_authenticate_only = false;
cc_proc_rfc4_gcm(req);
@@ -2317,16 +2322,25 @@
static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
{
/* Very similar to cc_aead_decrypt() above. */
-
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- int rc;
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
//plaintext is not decryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
- areq_ctx->backup_giv = NULL;
+ areq_ctx->assoclen = req->assoclen;
cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
@@ -2334,7 +2348,7 @@
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
-
+out:
return rc;
}
@@ -2358,13 +2372,14 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
@@ -2377,6 +2392,7 @@
.flow_mode = S_DIN_to_DES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),cbc(aes))",
@@ -2396,13 +2412,14 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
@@ -2415,6 +2432,7 @@
.flow_mode = S_DIN_to_DES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(xcbc(aes),cbc(aes))",
@@ -2434,6 +2452,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_XCBC_MAC,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
@@ -2453,6 +2472,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
@@ -2472,6 +2492,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
@@ -2491,6 +2512,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_XCBC_MAC,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ccm(aes)",
@@ -2510,6 +2532,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4309(ccm(aes))",
@@ -2529,6 +2552,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "gcm(aes)",
@@ -2548,6 +2572,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4106(gcm(aes))",
@@ -2567,6 +2592,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4543(gcm(aes))",
@@ -2586,6 +2612,7 @@
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
};
@@ -2670,7 +2697,8 @@
/* Linux crypto */
for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
- if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
+ if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & aead_algs[alg].std_body))
continue;
t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 5edf3b3..f12169b 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_aead.h
* ARM CryptoCell AEAD Crypto API
@@ -65,8 +65,8 @@
unsigned int hw_iv_size ____cacheline_aligned;
/* used to prevent cache coherence problem */
u8 backup_mac[MAX_MAC_SIZE];
- u8 *backup_iv; /*store iv for generated IV flow*/
- u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ u8 *backup_iv; /* store orig iv */
+ u32 assoclen; /* internal assoclen */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index dd948e1..a72586e 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
@@ -65,7 +65,7 @@
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- u32 skip = req->assoclen + req->cryptlen;
+ u32 skip = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->is_gcm4543)
skip += crypto_aead_ivsize(tfm);
@@ -83,51 +83,23 @@
*/
static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list,
- unsigned int nbytes, u32 *lbytes,
- bool *is_chained)
+ unsigned int nbytes, u32 *lbytes)
{
unsigned int nents = 0;
while (nbytes && sg_list) {
- if (sg_list->length) {
- nents++;
- /* get the number of bytes in the last entry */
- *lbytes = nbytes;
- nbytes -= (sg_list->length > nbytes) ?
- nbytes : sg_list->length;
- sg_list = sg_next(sg_list);
- } else {
- sg_list = (struct scatterlist *)sg_page(sg_list);
- if (is_chained)
- *is_chained = true;
- }
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
}
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
}
/**
- * cc_zero_sgl() - Zero scatter scatter list data.
- *
- * @sgl:
- */
-void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
-{
- struct scatterlist *current_sg = sgl;
- int sg_index = 0;
-
- while (sg_index <= data_len) {
- if (!current_sg) {
- /* reached the end of the sgl --> just return back */
- return;
- }
- memset(sg_virt(current_sg), 0, current_sg->length);
- sg_index += current_sg->length;
- current_sg = sg_next(current_sg);
- }
-}
-
-/**
* cc_copy_sg_portion() - Copy scatter list data,
* from to_skip to end, to dest and vice versa
*
@@ -140,9 +112,9 @@
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
- u32 nents, lbytes;
+ u32 nents;
- nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ nents = sg_nents_for_len(sg, end);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@@ -156,8 +128,11 @@
/* Verify there is no memory overflow*/
new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
- if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
+ dev_err(dev, "Too many mlli entries. current %d max %d\n",
+ new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
return -ENOMEM;
+ }
/*handle buffer longer than 64 kbytes */
while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
@@ -311,40 +286,10 @@
sgl_data->num_of_buffers++;
}
-static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
-{
- u32 i, j;
- struct scatterlist *l_sg = sg;
-
- for (i = 0; i < nents; i++) {
- if (!l_sg)
- break;
- if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
- dev_err(dev, "dma_map_page() sg buffer failed\n");
- goto err;
- }
- l_sg = sg_next(l_sg);
- }
- return nents;
-
-err:
- /* Restore mapped parts */
- for (j = 0; j < i; j++) {
- if (!sg)
- break;
- dma_unmap_sg(dev, sg, 1, direction);
- sg = sg_next(sg);
- }
- return 0;
-}
-
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
- bool is_chained = false;
-
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -358,35 +303,21 @@
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
- &is_chained);
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents);
return -ENOMEM;
}
- if (!is_chained) {
- /* In case of mmu the number of mapped nents might
- * be changed from the original sgl nents
- */
- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (*mapped_nents == 0) {
- *nents = 0;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
- } else {
- /*In this case the driver maps entry by entry so it
- * must have the same nents before and after map
- */
- *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
- direction);
- if (*mapped_nents != *nents) {
- *nents = *mapped_nents;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
+ /* In case of mmu the number of mapped nents might
+ * be changed from the original sgl nents
+ */
+ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+ if (*mapped_nents == 0) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
}
}
@@ -454,7 +385,7 @@
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -496,7 +427,7 @@
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
@@ -511,10 +442,8 @@
/* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto cipher_exit;
- }
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@@ -528,12 +457,11 @@
}
} else {
/* Map the dst sg */
- if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
- &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents)) {
- rc = -ENOMEM;
+ rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
goto cipher_exit;
- }
if (mapped_nents > 1)
req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
@@ -568,11 +496,7 @@
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- u32 dummy;
- bool chained;
- u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -612,12 +536,13 @@
if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
+ kzfree(areq_ctx->gen_ctx.iv);
}
- /*In case a pool was set, a table was
- *allocated and should be released
- */
- if (areq_ctx->mlli_params.curr_pool) {
+ /* Release pool */
+ if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
+ (areq_ctx->mlli_params.mlli_virt_addr)) {
dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
@@ -628,23 +553,13 @@
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
- req->assoclen, req->cryptlen);
- size_to_unmap = req->assoclen + req->cryptlen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
- size_to_unmap += areq_ctx->req_authsize;
- if (areq_ctx->is_gcm4543)
- size_to_unmap += crypto_aead_ivsize(tfm);
+ areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src,
- cc_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst,
- cc_get_sgl_nents(dev, req->dst, size_to_unmap,
- &dummy, &chained),
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -658,55 +573,10 @@
}
}
-static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
- unsigned int sgl_nents, unsigned int authsize,
- u32 last_entry_data_size,
- bool *is_icv_fragmented)
+static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size)
{
- unsigned int icv_max_size = 0;
- unsigned int icv_required_size = authsize > last_entry_data_size ?
- (authsize - last_entry_data_size) :
- authsize;
- unsigned int nents;
- unsigned int i;
-
- if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
- *is_icv_fragmented = false;
- return 0;
- }
-
- for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
- if (!sgl)
- break;
- sgl = sg_next(sgl);
- }
-
- if (sgl)
- icv_max_size = sgl->length;
-
- if (last_entry_data_size > authsize) {
- /* ICV attached to data in last entry (not fragmented!) */
- nents = 0;
- *is_icv_fragmented = false;
- } else if (last_entry_data_size == authsize) {
- /* ICV placed in whole last entry (not fragmented!) */
- nents = 1;
- *is_icv_fragmented = false;
- } else if (icv_max_size > icv_required_size) {
- nents = 1;
- *is_icv_fragmented = true;
- } else if (icv_max_size == icv_required_size) {
- nents = 2;
- *is_icv_fragmented = true;
- } else {
- dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
- nents = -1; /*unsupported*/
- }
- dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
-
- return nents;
+ return ((sgl_nents > 1) && (last_entry_data_size < authsize));
}
static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
@@ -717,19 +587,27 @@
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
+ gfp_t flags = cc_gfp_flags(&req->base);
int rc = 0;
if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
+ areq_ctx->gen_ctx.iv = NULL;
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
- hw_iv_size,
- DMA_BIDIRECTIONAL);
+ areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+ if (!areq_ctx->gen_ctx.iv)
+ return -ENOMEM;
+
+ areq_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
+ kzfree(areq_ctx->gen_ctx.iv);
+ areq_ctx->gen_ctx.iv = NULL;
rc = -ENOMEM;
goto chain_iv_exit;
}
@@ -760,11 +638,9 @@
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
- u32 mapped_nents = 0;
- struct scatterlist *current_sg = req->src;
+ int mapped_nents = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int sg_index = 0;
- u32 size_of_assoc = req->assoclen;
+ unsigned int size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
@@ -775,7 +651,7 @@
goto chain_assoc_exit;
}
- if (req->assoclen == 0) {
+ if (areq_ctx->assoclen == 0) {
areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
@@ -785,26 +661,10 @@
goto chain_assoc_exit;
}
- //iterate over the sgl to see how many entries are for associated data
- //it is assumed that if we reach here , the sgl is already mapped
- sg_index = current_sg->length;
- //the first entry in the scatter list contains all the associated data
- if (sg_index > size_of_assoc) {
- mapped_nents++;
- } else {
- while (sg_index <= size_of_assoc) {
- current_sg = sg_next(current_sg);
- /* if have reached the end of the sgl, then this is
- * unexpected
- */
- if (!current_sg) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
- sg_index += current_sg->length;
- mapped_nents++;
- }
- }
+ mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
+ if (mapped_nents < 0)
+ return mapped_nents;
+
if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -835,7 +695,7 @@
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
- req->assoclen, 0, is_last,
+ areq_ctx->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
@@ -850,39 +710,32 @@
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
+ struct scatterlist *sg;
+ ssize_t offset;
areq_ctx->is_icv_fragmented = false;
- if (req->src == req->dst) {
- /*INPLACE*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
- /*NON-INPLACE and DECRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
+
+ if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ sg = areq_ctx->src_sgl;
+ offset = *src_last_bytes - authsize;
} else {
- /*NON-INPLACE and ENCRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
+ sg = areq_ctx->dst_sgl;
+ offset = *dst_last_bytes - authsize;
}
+
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
+ areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
}
-static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- u32 *src_last_bytes, u32 *dst_last_bytes,
- bool is_last_table)
+static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
- int rc = 0, icv_nents;
struct device *dev = drvdata_to_dev(drvdata);
struct scatterlist *sg;
@@ -893,14 +746,9 @@
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV
@@ -942,16 +790,11 @@
areq_ctx->dst_offset, is_last_table,
&areq_ctx->dst.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
-
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
/* Backup happens only when ICV is fragmented, ICV
+
* verification is made by CPU compare in order to simplify
* MAC verification upon request completion
*/
@@ -979,14 +822,9 @@
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
- areq_ctx->dst.nents,
- authsize, *dst_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->dst.nents, authsize,
+ *dst_last_bytes);
if (!areq_ctx->is_icv_fragmented) {
sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
@@ -1000,9 +838,6 @@
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
}
}
-
-prepare_data_mlli_exit:
- return rc;
}
static int cc_aead_chain_data(struct cc_drvdata *drvdata,
@@ -1019,12 +854,12 @@
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
- unsigned int size_for_map = req->assoclen + req->cryptlen;
+ unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
- bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543;
- u32 size_to_skip = req->assoclen;
+ u32 size_to_skip = areq_ctx->assoclen;
+ struct scatterlist *sgl;
if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
@@ -1043,19 +878,17 @@
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
- &src_last_bytes, &chained);
+ &src_last_bytes);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
- offset -= areq_ctx->src_sgl->length;
- areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->src_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
- sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--;
+ offset -= areq_ctx->src_sgl->length;
+ sgl = sg_next(areq_ctx->src_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->src_sgl = sgl;
+ sg_index += areq_ctx->src_sgl->length;
}
if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1068,7 +901,7 @@
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
- size_for_map = req->assoclen + req->cryptlen;
+ size_for_map = areq_ctx->assoclen + req->cryptlen;
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
if (is_gcm4543)
@@ -1078,28 +911,24 @@
&areq_ctx->dst.nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto chain_data_exit;
- }
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
- &dst_last_bytes, &chained);
+ &dst_last_bytes);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
//check where the data starts
while (sg_index <= size_to_skip) {
- offset -= areq_ctx->dst_sgl->length;
- areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->dst_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
- sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--;
+ offset -= areq_ctx->dst_sgl->length;
+ sgl = sg_next(areq_ctx->dst_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->dst_sgl = sgl;
+ sg_index += areq_ctx->dst_sgl->length;
}
if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1112,9 +941,9 @@
dst_mapped_nents > 1 ||
do_chain) {
areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
- rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
- &src_last_bytes,
- &dst_last_bytes, is_last_table);
+ cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes, &dst_last_bytes,
+ is_last_table);
} else {
areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
cc_prepare_aead_data_dlli(req, &src_last_bytes,
@@ -1235,11 +1064,10 @@
}
areq_ctx->ccm_iv0_dma_addr = dma_addr;
- if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
- &sg_data, req->assoclen)) {
- rc = -ENOMEM;
+ rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, areq_ctx->assoclen);
+ if (rc)
goto aead_map_failure;
- }
}
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
@@ -1288,7 +1116,7 @@
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
- size_to_map = req->cryptlen + req->assoclen;
+ size_to_map = req->cryptlen + areq_ctx->assoclen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_map += authsize;
@@ -1299,10 +1127,8 @@
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
goto aead_map_failure;
- }
if (areq_ctx->is_single_pass) {
/*
@@ -1386,6 +1212,7 @@
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1405,18 +1232,18 @@
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
if (*curr_buff_cnt) {
- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
- &sg_data)) {
- return -ENOMEM;
- }
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
}
if (src && nbytes > 0 && do_update) {
- if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
- &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents)) {
+ rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents);
+ if (rc)
goto unmap_curr_buff;
- }
if (src && mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
memcpy(areq_ctx->buff_sg, src,
@@ -1435,7 +1262,8 @@
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto fail_unmap_din;
}
/* change the buffer index for the unmap function */
@@ -1451,7 +1279,7 @@
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- return -ENOMEM;
+ return rc;
}
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
@@ -1470,6 +1298,7 @@
struct buffer_array sg_data;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
unsigned int swap_index = 0;
+ int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
@@ -1485,8 +1314,7 @@
if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
- areq_ctx->in_nents =
- cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+ areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
@@ -1514,21 +1342,21 @@
}
if (*curr_buff_cnt) {
- if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
- &sg_data)) {
- return -ENOMEM;
- }
+ rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data);
+ if (rc)
+ return rc;
/* change the buffer index for next operation */
swap_index = 1;
}
if (update_data_len > *curr_buff_cnt) {
- if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE, &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents)) {
+ rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents);
+ if (rc)
goto unmap_curr_buff;
- }
if (mapped_nents == 1 &&
areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
/* only one entry in the SG and no previous data */
@@ -1548,7 +1376,8 @@
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents);
- if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
goto fail_unmap_din;
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1562,7 +1391,7 @@
if (*curr_buff_cnt)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- return -ENOMEM;
+ return rc;
}
void cc_unmap_hash_request(struct device *dev, void *ctx,
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 3ec4b4d..af43487 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_buffer_mgr.h
* Buffer Manager
@@ -66,6 +66,4 @@
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
-void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
-
#endif /*__BUFFER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 7623b29..254b487 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1,12 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
#include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/des.h>
+#include <crypto/internal/des.h>
#include <crypto/xts.h>
+#include <crypto/sm4.h>
#include <crypto/scatterwalk.h>
#include "cc_driver.h"
@@ -33,6 +34,18 @@
enum cc_hw_crypto_key key2_slot;
};
+struct cc_cpp_key_info {
+ u8 slot;
+ enum cc_cpp_alg alg;
+};
+
+enum cc_key_type {
+ CC_UNPROTECTED_KEY, /* User key */
+ CC_HW_PROTECTED_KEY, /* HW (FDE) key */
+ CC_POLICY_PROTECTED_KEY, /* CPP key */
+ CC_INVALID_PROTECTED_KEY /* Invalid key */
+};
+
struct cc_cipher_ctx {
struct cc_drvdata *drvdata;
int keylen;
@@ -40,19 +53,22 @@
int cipher_mode;
int flow_mode;
unsigned int flags;
- bool hw_key;
+ enum cc_key_type key_type;
struct cc_user_key_info user;
- struct cc_hw_key_info hw;
+ union {
+ struct cc_hw_key_info hw;
+ struct cc_cpp_key_info cpp;
+ };
struct crypto_shash *shash_tfm;
};
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
-static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- return ctx_p->hw_key;
+ return ctx_p->key_type;
}
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
@@ -79,10 +95,14 @@
default:
break;
}
+ break;
case S_DIN_to_DES:
if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0;
break;
+ case S_DIN_to_SM4:
+ if (size == SM4_KEY_SIZE)
+ return 0;
default:
break;
}
@@ -96,10 +116,6 @@
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_XTS:
- if (size >= AES_BLOCK_SIZE &&
- IS_ALIGNED(size, AES_BLOCK_SIZE))
- return 0;
- break;
case DRV_CIPHER_CBC_CTS:
if (size >= AES_BLOCK_SIZE)
return 0;
@@ -122,6 +138,17 @@
if (IS_ALIGNED(size, DES_BLOCK_SIZE))
return 0;
break;
+ case S_DIN_to_SM4:
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CTR:
+ return 0;
+ case DRV_CIPHER_ECB:
+ case DRV_CIPHER_CBC:
+ if (IS_ALIGNED(size, SM4_BLOCK_SIZE))
+ return 0;
+ default:
+ break;
+ }
default:
break;
}
@@ -216,7 +243,7 @@
u8 key3[DES_KEY_SIZE];
};
-static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
+static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
{
switch (slot_num) {
case 0:
@@ -231,6 +258,22 @@
return END_OF_KEYS;
}
+static u8 cc_slot_to_cpp_key(u8 slot_num)
+{
+ return (slot_num - CC_FIRST_CPP_KEY_SLOT);
+}
+
+static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
+{
+ if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
+ return CC_HW_PROTECTED_KEY;
+ else if (slot_num >= CC_FIRST_CPP_KEY_SLOT &&
+ slot_num <= CC_LAST_CPP_KEY_SLOT)
+ return CC_POLICY_PROTECTED_KEY;
+ else
+ return CC_INVALID_PROTECTED_KEY;
+}
+
static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
unsigned int keylen)
{
@@ -245,18 +288,13 @@
/* STAT_PHASE_0: Init and sanity checks */
- /* This check the size of the hardware key token */
+ /* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
- dev_err(dev, "Unsupported HW key size %d.\n", keylen);
+ dev_err(dev, "Unsupported protected key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- if (ctx_p->flow_mode != S_DIN_to_AES) {
- dev_err(dev, "HW key not supported for non-AES flows\n");
- return -EINVAL;
- }
-
memcpy(&hki, key, keylen);
/* The real key len for crypto op is the size of the HW key
@@ -270,32 +308,71 @@
return -EINVAL;
}
- ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
- if (ctx_p->hw.key1_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
+ ctx_p->keylen = keylen;
+
+ switch (cc_slot_to_key_type(hki.hw_key1)) {
+ case CC_HW_PROTECTED_KEY:
+ if (ctx_p->flow_mode == S_DIN_to_SM4) {
+ dev_err(dev, "Only AES HW protected keys are supported\n");
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki.hw_key1);
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki.hw_key1 == hki.hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki.hw_key1, hki.hw_key2);
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki.hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->key_type = CC_HW_PROTECTED_KEY;
+ dev_dbg(dev, "HW protected key %d/%d set\n.",
+ ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
+ break;
+
+ case CC_POLICY_PROTECTED_KEY:
+ if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
+ dev_err(dev, "CPP keys not supported in this hardware revision.\n");
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
+ ctx_p->cipher_mode != DRV_CIPHER_CTR) {
+ dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
+ return -EINVAL;
+ }
+
+ ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
+ if (ctx_p->flow_mode == S_DIN_to_AES)
+ ctx_p->cpp.alg = CC_CPP_AES;
+ else /* Must be SM4 since due to sethkey registration */
+ ctx_p->cpp.alg = CC_CPP_SM4;
+ ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
+ dev_dbg(dev, "policy protected key alg: %d slot: %d.\n",
+ ctx_p->cpp.alg, ctx_p->cpp.slot);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
return -EINVAL;
}
- if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
- if (hki.hw_key1 == hki.hw_key2) {
- dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
- hki.hw_key1, hki.hw_key2);
- return -EINVAL;
- }
- ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
- if (ctx_p->hw.key2_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key2 number (%d)\n",
- hki.hw_key2);
- return -EINVAL;
- }
- }
-
- ctx_p->keylen = keylen;
- ctx_p->hw_key = true;
- dev_dbg(dev, "cc_is_hw_key ret 0");
-
return 0;
}
@@ -305,7 +382,6 @@
struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
struct cc_crypto_alg *cc_alg =
container_of(tfm->__crt_alg, struct cc_crypto_alg,
skcipher_alg.base);
@@ -323,7 +399,7 @@
return -EINVAL;
}
- ctx_p->hw_key = false;
+ ctx_p->key_type = CC_UNPROTECTED_KEY;
/*
* Verify DES weak keys
@@ -331,14 +407,9 @@
* HW does the expansion on its own.
*/
if (ctx_p->flow_mode == S_DIN_to_DES) {
- if (keylen == DES3_EDE_KEY_SIZE &&
- __des3_ede_setkey(tmp, &tfm->crt_flags, key,
- DES3_EDE_KEY_SIZE)) {
- dev_dbg(dev, "weak 3DES key");
- return -EINVAL;
- } else if (!des_ekey(tmp, key) &&
- (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ if ((keylen == DES3_EDE_KEY_SIZE &&
+ verify_skcipher_des3_key(sktfm, key)) ||
+ verify_skcipher_des_key(sktfm, key)) {
dev_dbg(dev, "weak DES key");
return -EINVAL;
}
@@ -382,7 +453,77 @@
return 0;
}
-static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return S_AES_to_DOUT;
+ case S_DIN_to_DES:
+ return S_DES_to_DOUT;
+ case S_DIN_to_SM4:
+ return S_SM4_to_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
+
+static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = cc_out_setup_mode(ctx_p);
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
+ return;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Read next IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
+ }
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
struct cc_hw_desc desc[],
@@ -406,11 +547,13 @@
du_size = cc_alg->data_unit;
switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
case DRV_CIPHER_CBC:
case DRV_CIPHER_CBC_CTS:
case DRV_CIPHER_CTR:
case DRV_CIPHER_OFB:
- /* Load cipher state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
NS_BIT);
@@ -424,57 +567,15 @@
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
}
(*seq_size)++;
- /*FALLTHROUGH*/
- case DRV_CIPHER_ECB:
- /* Load key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (flow_mode == S_DIN_to_AES) {
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI,
- key_dma_addr, ((key_len == 24) ?
- AES_MAX_KEY_SIZE :
- key_len), NS_BIT);
- }
- set_key_size_aes(&desc[*seq_size], key_len);
- } else {
- /*des*/
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- key_len, NS_BIT);
- set_key_size_des(&desc[*seq_size], key_len);
- }
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
- /* Load AES key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- (key_len / 2), NS_BIT);
- }
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
-
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key2_slot);
} else {
@@ -488,7 +589,7 @@
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
(*seq_size)++;
- /* Set state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -504,45 +605,113 @@
}
}
-static void cc_setup_cipher_data(struct crypto_tfm *tfm,
- struct cipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- void *areq, struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return DIN_AES_DOUT;
+ case S_DIN_to_DES:
+ return DIN_DES_DOUT;
+ case S_DIN_to_SM4:
+ return DIN_SM4_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
+
+static void cc_setup_key_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- unsigned int flow_mode = ctx_p->flow_mode;
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ unsigned int din_size;
- switch (ctx_p->flow_mode) {
- case S_DIN_to_AES:
- flow_mode = DIN_AES_DOUT;
- break;
- case S_DIN_to_DES:
- flow_mode = DIN_DES_DOUT;
- break;
- default:
- dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
- return;
- }
- /* Process */
- if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
- dev_dbg(dev, " data params addr %pad length 0x%X\n",
- &sg_dma_address(src), nbytes);
- dev_dbg(dev, " data params addr %pad length 0x%X\n",
- &sg_dma_address(dst), nbytes);
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_ECB:
+ /* Load key */
hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
- nbytes, NS_BIT);
- set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
- nbytes, NS_BIT, (!areq ? 0 : 1));
- if (areq)
- set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
+ /* We use the AES key size coding for all CPP algs */
+ set_key_size_aes(&desc[*seq_size], key_len);
+ set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
+ flow_mode = cc_out_flow_mode(ctx_p);
+ } else {
+ if (flow_mode == S_DIN_to_AES) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ /* CC_POLICY_UNPROTECTED_KEY
+ * Invalid keys are filtered out in
+ * sethkey()
+ */
+ din_size = (key_len == 24) ?
+ AES_MAX_KEY_SIZE : key_len;
+
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, din_size,
+ NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], key_len);
+ } else {
+ /*des*/
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
+ }
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ }
set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
- } else {
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* Load AES key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ (key_len / 2), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, void *areq,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
/* bypass */
dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
&req_ctx->mlli_params.mlli_dma_addr,
@@ -557,7 +726,38 @@
req_ctx->mlli_params.mlli_len);
set_flow_mode(&desc[*seq_size], BYPASS);
(*seq_size)++;
+ }
+}
+static void cc_setup_flow_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ unsigned int flow_mode = cc_out_flow_mode(ctx_p);
+ bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
+ ctx_p->cipher_mode == DRV_CIPHER_ECB);
+
+ /* Process */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+ nbytes, NS_BIT);
+ set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+ nbytes, NS_BIT, (!last_desc ? 0 : 1));
+ if (last_desc)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ } else {
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_MLLI,
ctx_p->drvdata->mlli_sram_addr,
@@ -569,7 +769,7 @@
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
} else {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
@@ -580,9 +780,9 @@
(LLI_ENTRY_BYTE_SIZE *
req_ctx->in_mlli_nents)),
req_ctx->out_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
}
- if (areq)
+ if (last_desc)
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
set_flow_mode(&desc[*seq_size], flow_mode);
@@ -590,38 +790,6 @@
}
}
-/*
- * Update a CTR-AES 128 bit counter
- */
-static void cc_update_ctr(u8 *ctr, unsigned int increment)
-{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
- IS_ALIGNED((unsigned long)ctr, 8)) {
-
- __be64 *high_be = (__be64 *)ctr;
- __be64 *low_be = high_be + 1;
- u64 orig_low = __be64_to_cpu(*low_be);
- u64 new_low = orig_low + (u64)increment;
-
- *low_be = __cpu_to_be64(new_low);
-
- if (new_low < orig_low)
- *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
- } else {
- u8 *pos = (ctr + AES_BLOCK_SIZE);
- u8 val;
- unsigned int size;
-
- for (; increment; increment--)
- for (size = AES_BLOCK_SIZE; size; size--) {
- val = *--pos + 1;
- *pos = val;
- if (val)
- break;
- }
- }
-}
-
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
@@ -629,43 +797,15 @@
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- unsigned int len;
- switch (ctx_p->cipher_mode) {
- case DRV_CIPHER_CBC:
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req->iv, req->dst, len,
- ivsize, 0);
- }
- break;
-
- case DRV_CIPHER_CTR:
- /* Compute the counter of the last block */
- len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
- cc_update_ctr((u8 *)req->iv, len);
- break;
-
- default:
- break;
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ kzfree(req_ctx->iv);
}
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
- kzfree(req_ctx->iv);
-
skcipher_request_complete(req, err);
}
@@ -720,6 +860,13 @@
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
+ /* Setup CPP operation details */
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
+ cc_req.cpp.is_cpp = true;
+ cc_req.cpp.alg = ctx_p->cpp.alg;
+ cc_req.cpp.slot = ctx_p->cpp.slot;
+ }
+
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
@@ -734,11 +881,16 @@
/* STAT_PHASE_2: Create sequence */
- /* Setup processing */
- cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup IV and XEX key used */
+ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup MLLI line, if needed */
+ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
+ /* Setup key */
+ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
/* Data processing */
- cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
- &seq_len);
+ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
+ /* Read next IV */
+ cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
/* STAT_PHASE_3: Lock HW and push sequence */
@@ -753,7 +905,6 @@
exit_process:
if (rc != -EINPROGRESS && rc != -EBUSY) {
- kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
}
@@ -771,30 +922,10 @@
static int cc_cipher_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- gfp_t flags = cc_gfp_flags(&req->base);
- unsigned int len;
memset(req_ctx, 0, sizeof(*req_ctx));
- if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
-
- /* Allocate and save the last IV sized bytes of the source,
- * which will be lost in case of in-place decryption.
- */
- req_ctx->backup_info = kzalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
-
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
- ivsize, 0);
- }
-
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -803,7 +934,7 @@
{
.name = "xts(paes)",
.driver_name = "xts-paes-ccree",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -815,11 +946,13 @@
.cipher_mode = DRV_CIPHER_XTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts512(paes)",
.driver_name = "xts-paes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -832,11 +965,13 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts4096(paes)",
.driver_name = "xts-paes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -849,6 +984,8 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv(paes)",
@@ -865,6 +1002,8 @@
.cipher_mode = DRV_CIPHER_ESSIV,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv512(paes)",
@@ -882,6 +1021,8 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv4096(paes)",
@@ -899,6 +1040,8 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker(paes)",
@@ -915,6 +1058,8 @@
.cipher_mode = DRV_CIPHER_BITLOCKER,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker512(paes)",
@@ -932,6 +1077,8 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker4096(paes)",
@@ -949,6 +1096,8 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ecb(paes)",
@@ -965,6 +1114,8 @@
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cbc(paes)",
@@ -981,6 +1132,8 @@
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ofb(paes)",
@@ -997,6 +1150,8 @@
.cipher_mode = DRV_CIPHER_OFB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cts(cbc(paes))",
@@ -1013,6 +1168,8 @@
.cipher_mode = DRV_CIPHER_CBC_CTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ctr(paes)",
@@ -1029,11 +1186,13 @@
.cipher_mode = DRV_CIPHER_CTR,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts(aes)",
.driver_name = "xts-aes-ccree",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1045,11 +1204,12 @@
.cipher_mode = DRV_CIPHER_XTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts512(aes)",
.driver_name = "xts-aes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1062,11 +1222,12 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts4096(aes)",
.driver_name = "xts-aes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1079,6 +1240,7 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv(aes)",
@@ -1095,6 +1257,7 @@
.cipher_mode = DRV_CIPHER_ESSIV,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv512(aes)",
@@ -1112,6 +1275,7 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv4096(aes)",
@@ -1129,6 +1293,7 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker(aes)",
@@ -1145,6 +1310,7 @@
.cipher_mode = DRV_CIPHER_BITLOCKER,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker512(aes)",
@@ -1162,6 +1328,7 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker4096(aes)",
@@ -1179,6 +1346,7 @@
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(aes)",
@@ -1195,6 +1363,7 @@
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(aes)",
@@ -1211,6 +1380,7 @@
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ofb(aes)",
@@ -1227,6 +1397,7 @@
.cipher_mode = DRV_CIPHER_OFB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cts(cbc(aes))",
@@ -1243,6 +1414,7 @@
.cipher_mode = DRV_CIPHER_CBC_CTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ctr(aes)",
@@ -1259,6 +1431,7 @@
.cipher_mode = DRV_CIPHER_CTR,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(des3_ede)",
@@ -1275,6 +1448,7 @@
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(des3_ede)",
@@ -1291,6 +1465,7 @@
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(des)",
@@ -1307,6 +1482,7 @@
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(des)",
@@ -1323,6 +1499,94 @@
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "cbc(sm4)",
+ .driver_name = "cbc-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "ecb(sm4)",
+ .driver_name = "ecb-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "ctr(sm4)",
+ .driver_name = "ctr-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "cbc(psm4)",
+ .driver_name = "cbc-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
+ {
+ .name = "ctr(psm4)",
+ .driver_name = "ctr-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
},
};
@@ -1398,7 +1662,9 @@
dev_dbg(dev, "Number of algorithms = %zu\n",
ARRAY_SIZE(skcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
- if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev)
+ if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
+ (drvdata->sec_disabled && skcipher_algs[alg].sec_func))
continue;
dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 4dbc0a1..da3a387 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_cipher.h
* ARM CryptoCell Cipher Crypto API
@@ -20,7 +20,6 @@
u32 in_mlli_nents;
u32 out_nents;
u32 out_mlli_nents;
- u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
struct mlli_params mlli_params;
};
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index e032544..ccf960a 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_CRYPTO_CTX_H_
#define _CC_CRYPTO_CTX_H_
@@ -55,6 +55,14 @@
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+#define CC_CPP_NUM_SLOTS 8
+#define CC_CPP_NUM_ALGS 2
+
+enum cc_cpp_alg {
+ CC_CPP_SM4 = 1,
+ CC_CPP_AES = 0
+};
+
enum drv_engine_type {
DRV_ENGINE_NULL = 0,
DRV_ENGINE_AES = 1,
@@ -115,7 +123,8 @@
DRV_HASH_CBC_MAC = 6,
DRV_HASH_XCBC_MAC = 7,
DRV_HASH_CMAC = 8,
- DRV_HASH_MODE_NUM = 9,
+ DRV_HASH_SM3 = 9,
+ DRV_HASH_MODE_NUM = 10,
DRV_HASH_RESERVE32B = S32_MAX
};
@@ -127,6 +136,7 @@
DRV_HASH_HW_SHA512 = 4,
DRV_HASH_HW_SHA384 = 12,
DRV_HASH_HW_GHASH = 6,
+ DRV_HASH_HW_SM3 = 14,
DRV_HASH_HW_RESERVE32B = S32_MAX
};
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5ca184e..5669997 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/debugfs.h>
@@ -25,9 +25,24 @@
*/
static struct dentry *cc_debugfs_dir;
-static struct debugfs_reg32 debug_regs[] = {
+static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "SIGNATURE" }, /* Must be 0th */
{ .name = "VERSION" }, /* Must be 1st */
+};
+
+static struct debugfs_reg32 pid_cid_regs[] = {
+ CC_DEBUG_REG(PERIPHERAL_ID_0),
+ CC_DEBUG_REG(PERIPHERAL_ID_1),
+ CC_DEBUG_REG(PERIPHERAL_ID_2),
+ CC_DEBUG_REG(PERIPHERAL_ID_3),
+ CC_DEBUG_REG(PERIPHERAL_ID_4),
+ CC_DEBUG_REG(COMPONENT_ID_0),
+ CC_DEBUG_REG(COMPONENT_ID_1),
+ CC_DEBUG_REG(COMPONENT_ID_2),
+ CC_DEBUG_REG(COMPONENT_ID_3),
+};
+
+static struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
@@ -39,11 +54,9 @@
CC_DEBUG_REG(AXIM_MON_COMP),
};
-int __init cc_debugfs_global_init(void)
+void __init cc_debugfs_global_init(void)
{
cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
-
- return !cc_debugfs_dir;
}
void __exit cc_debugfs_global_fini(void)
@@ -55,11 +68,7 @@
{
struct device *dev = drvdata_to_dev(drvdata);
struct cc_debugfs_ctx *ctx;
- struct debugfs_regset32 *regset;
- struct dentry *file;
-
- debug_regs[0].offset = drvdata->sig_offset;
- debug_regs[1].offset = drvdata->ver_offset;
+ struct debugfs_regset32 *regset, *verset;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -74,25 +83,30 @@
regset->base = drvdata->cc_base;
ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
- if (!ctx->dir)
- return -ENFILE;
- file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
- if (!file) {
- debugfs_remove(ctx->dir);
- return -ENFILE;
+ debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+ debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
+
+ verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
+ /* Failing here is not important enough to fail the module load */
+ if (!verset)
+ goto out;
+
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ ver_sig_regs[0].offset = drvdata->sig_offset;
+ ver_sig_regs[1].offset = drvdata->ver_offset;
+ verset->regs = ver_sig_regs;
+ verset->nregs = ARRAY_SIZE(ver_sig_regs);
+ } else {
+ verset->regs = pid_cid_regs;
+ verset->nregs = ARRAY_SIZE(pid_cid_regs);
}
+ verset->base = drvdata->cc_base;
- file = debugfs_create_bool("coherent", 0400, ctx->dir,
- &drvdata->coherent);
+ debugfs_create_regset32("version", 0400, ctx->dir, verset);
- if (!file) {
- debugfs_remove_recursive(ctx->dir);
- return -ENFILE;
- }
-
+out:
drvdata->debugfs = ctx;
-
return 0;
}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 5b5320e..664ff40 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_DEBUGFS_H__
#define __CC_DEBUGFS_H__
#ifdef CONFIG_DEBUG_FS
-int cc_debugfs_global_init(void);
+void cc_debugfs_global_init(void);
void cc_debugfs_global_fini(void);
int cc_debugfs_init(struct cc_drvdata *drvdata);
@@ -13,11 +13,7 @@
#else
-static inline int cc_debugfs_global_init(void)
-{
- return 0;
-}
-
+static inline void cc_debugfs_global_init(void) {}
static inline void cc_debugfs_global_fini(void) {}
static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 1ff229c..8b8eee5 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -22,7 +22,6 @@
#include "cc_cipher.h"
#include "cc_aead.h"
#include "cc_hash.h"
-#include "cc_ivgen.h"
#include "cc_sram_mgr.h"
#include "cc_pm.h"
#include "cc_fips.h"
@@ -30,32 +29,68 @@
bool cc_dump_desc;
module_param_named(dump_desc, cc_dump_desc, bool, 0600);
MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
-
bool cc_dump_bytes;
module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+static bool cc_sec_disable;
+module_param_named(sec_disable, cc_sec_disable, bool, 0600);
+MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
+
struct cc_hw_data {
char *name;
enum cc_hw_rev rev;
u32 sig;
+ u32 cidr_0123;
+ u32 pidr_0124;
+ int std_bodies;
+};
+
+#define CC_NUM_IDRS 4
+#define CC_HW_RESET_LOOP_COUNT 10
+
+/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
+static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
+ CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
+ CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
+};
+
+static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
+ CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
+ CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
};
/* Hardware revisions defs. */
+/* The 703 is a OSCCA only variant of the 713 */
+static const struct cc_hw_data cc703_hw = {
+ .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
+};
+
+static const struct cc_hw_data cc713_hw = {
+ .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
+};
+
static const struct cc_hw_data cc712_hw = {
- .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U
+ .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U,
+ .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc710_hw = {
- .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U
+ .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U,
+ .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc630p_hw = {
- .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U
+ .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U,
+ .std_bodies = CC_STD_ALL
};
static const struct of_device_id arm_ccree_dev_of_match[] = {
+ { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw },
+ { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw },
{ .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
{ .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
{ .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
@@ -63,6 +98,20 @@
};
MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
+{
+ int i;
+ union {
+ u8 regs[CC_NUM_IDRS];
+ __le32 val;
+ } idr;
+
+ for (i = 0; i < CC_NUM_IDRS; ++i)
+ idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
+
+ return le32_to_cpu(idr.val);
+}
+
void __dump_byte_array(const char *name, const u8 *buf, size_t len)
{
char prefix[64];
@@ -84,14 +133,17 @@
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
+ /* if driver suspended return, probebly shared interrupt */
+ if (cc_pm_is_dev_suspended(dev))
+ return IRQ_NONE;
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
- if (irr == 0) { /* Probably shared interrupt line */
- dev_err(dev, "Got interrupt with empty IRR\n");
+
+ if (irr == 0) /* Probably shared interrupt line */
return IRQ_NONE;
- }
+
imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
/* clear interrupt - must be before processing events */
@@ -99,12 +151,12 @@
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if (irr & CC_COMP_IRQ_MASK) {
- /* Mask AXI completion interrupt - will be unmasked in
- * Deferred service handler
+ if (irr & drvdata->comp_mask) {
+ /* Mask all completion interrupts - will be unmasked in
+ * deferred service handler
*/
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
- irr &= ~CC_COMP_IRQ_MASK;
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
+ irr &= ~drvdata->comp_mask;
complete_request(drvdata);
}
#ifdef CONFIG_CRYPTO_FIPS
@@ -139,16 +191,44 @@
return IRQ_HANDLED;
}
+bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
+{
+ unsigned int val;
+ unsigned int i;
+
+ /* 712/710/63 has no reset completion indication, always return true */
+ if (drvdata->hw_rev <= CC_HW_REV_712)
+ return true;
+
+ for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
+ /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
+ * completed and device is fully functional
+ */
+ val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
+ if (val & CC_NVM_IS_IDLE_MASK) {
+ /* hw indicate reset completed */
+ return true;
+ }
+ /* allow scheduling other process on the processor */
+ schedule();
+ }
+ /* reset not completed */
+ return false;
+}
+
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
{
unsigned int val, cache_params;
struct device *dev = drvdata_to_dev(drvdata);
- /* Unmask all AXI interrupt sources AXI_CFG1 register */
- val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
- cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
- dev_dbg(dev, "AXIM_CFG=0x%08X\n",
- cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ /* AXI interrupt config are obsoleted startign at cc7x3 */
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ }
/* Clear all pending interrupts */
val = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -156,7 +236,7 @@
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+ val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
if (drvdata->hw_rev >= CC_HW_REV_712)
val |= CC_GPR0_IRQ_MASK;
@@ -186,7 +266,7 @@
struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
- u32 signature_val;
+ u32 val, hw_rev_pidr, sig_cidr;
u64 dma_mask;
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
@@ -204,19 +284,20 @@
hw_rev = (struct cc_hw_data *)dev_id->data;
new_drvdata->hw_rev_name = hw_rev->name;
new_drvdata->hw_rev = hw_rev->rev;
+ new_drvdata->std_bodies = hw_rev->std_bodies;
if (hw_rev->rev >= CC_HW_REV_712) {
- new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
} else {
- new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
}
+ new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
+
platform_set_drvdata(plat_dev, new_drvdata);
new_drvdata->plat_dev = plat_dev;
@@ -257,19 +338,8 @@
/* Then IRQ */
new_drvdata->irq = platform_get_irq(plat_dev, 0);
- if (new_drvdata->irq < 0) {
- dev_err(dev, "Failed getting IRQ resource\n");
+ if (new_drvdata->irq < 0)
return new_drvdata->irq;
- }
-
- rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
- IRQF_SHARED, "ccree", new_drvdata);
- if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n",
- new_drvdata->irq);
- return rc;
- }
- dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
init_completion(&new_drvdata->hw_queue_avail);
@@ -297,20 +367,89 @@
return rc;
}
- /* Verify correct mapping */
- signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
- if (signature_val != hw_rev->sig) {
- dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, hw_rev->sig);
- rc = -EINVAL;
- goto post_clk_err;
+ new_drvdata->sec_disabled = cc_sec_disable;
+
+ /* wait for Crytpcell reset completion */
+ if (!cc_wait_for_reset_completion(new_drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
}
- dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+
+ if (hw_rev->rev <= CC_HW_REV_712) {
+ /* Verify correct mapping */
+ val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+ if (val != hw_rev->sig) {
+ dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ sig_cidr = val;
+ hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
+ } else {
+ /* Verify correct mapping */
+ val = cc_read_idr(new_drvdata, pidr_0124_offsets);
+ if (val != hw_rev->pidr_0124) {
+ dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
+ val, hw_rev->pidr_0124);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ hw_rev_pidr = val;
+
+ val = cc_read_idr(new_drvdata, cidr_0123_offsets);
+ if (val != hw_rev->cidr_0123) {
+ dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
+ val, hw_rev->cidr_0123);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ sig_cidr = val;
+
+ /* Check HW engine configuration */
+ val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
+ switch (val) {
+ case CC_PINS_FULL:
+ /* This is fine */
+ break;
+ case CC_PINS_SLIM:
+ if (new_drvdata->std_bodies & CC_STD_NIST) {
+ dev_warn(dev, "703 mode forced due to HW configuration.\n");
+ new_drvdata->std_bodies = CC_STD_OSCCA;
+ }
+ break;
+ default:
+ dev_err(dev, "Unsupported engines configuration.\n");
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+
+ /* Check security disable state */
+ val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
+ val &= CC_SECURITY_DISABLED_MASK;
+ new_drvdata->sec_disabled |= !!val;
+
+ if (!new_drvdata->sec_disabled) {
+ new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
+ if (new_drvdata->std_bodies & CC_STD_NIST)
+ new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
+ }
+ }
+
+ if (new_drvdata->sec_disabled)
+ dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
/* Display HW versions */
- dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
- hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
- DRV_MODULE_VERSION);
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
+ hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
+ /* register the driver isr function */
+ rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
+ IRQF_SHARED, "ccree", new_drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n",
+ new_drvdata->irq);
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
@@ -361,17 +500,11 @@
goto post_buf_mgr_err;
}
- rc = cc_ivgen_init(new_drvdata);
- if (rc) {
- dev_err(dev, "cc_ivgen_init failed\n");
- goto post_power_mgr_err;
- }
-
/* Allocate crypto algs */
rc = cc_cipher_alloc(new_drvdata);
if (rc) {
dev_err(dev, "cc_cipher_alloc failed\n");
- goto post_ivgen_err;
+ goto post_buf_mgr_err;
}
/* hash must be allocated before aead since hash exports APIs */
@@ -387,6 +520,9 @@
goto post_hash_err;
}
+ /* All set, we can allow autosuspend */
+ cc_pm_go(new_drvdata);
+
/* If we got here and FIPS mode is enabled
* it means all FIPS test passed, so let TEE
* know we're good.
@@ -399,10 +535,6 @@
cc_hash_free(new_drvdata);
post_cipher_err:
cc_cipher_free(new_drvdata);
-post_ivgen_err:
- cc_ivgen_fini(new_drvdata);
-post_power_mgr_err:
- cc_pm_fini(new_drvdata);
post_buf_mgr_err:
cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
@@ -434,7 +566,6 @@
cc_aead_free(drvdata);
cc_hash_free(drvdata);
cc_cipher_free(drvdata);
- cc_ivgen_fini(drvdata);
cc_pm_fini(drvdata);
cc_buffer_mgr_fini(drvdata);
cc_req_mgr_fini(drvdata);
@@ -461,6 +592,14 @@
return 0;
}
+unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
+{
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ return HASH_LEN_SIZE_712;
+ else
+ return HASH_LEN_SIZE_630;
+}
+
void cc_clk_off(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
@@ -514,13 +653,8 @@
static int __init ccree_init(void)
{
- int ret;
-
cc_hash_global_init();
-
- ret = cc_debugfs_global_init();
- if (ret)
- return ret;
+ cc_debugfs_global_init();
return platform_driver_register(&ccree_driver);
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index d608a4f..ab31d4a 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_driver.h
* ARM CryptoCell Linux Crypto Driver
@@ -36,16 +36,26 @@
extern bool cc_dump_desc;
extern bool cc_dump_bytes;
-#define DRV_MODULE_VERSION "4.0"
+#define DRV_MODULE_VERSION "5.0"
enum cc_hw_rev {
CC_HW_REV_630 = 630,
CC_HW_REV_710 = 710,
- CC_HW_REV_712 = 712
+ CC_HW_REV_712 = 712,
+ CC_HW_REV_713 = 713
+};
+
+enum cc_std_body {
+ CC_STD_NIST = 0x1,
+ CC_STD_OSCCA = 0x2,
+ CC_STD_ALL = 0x3
};
#define CC_COHERENT_CACHE_PARAMS 0xEEE
+#define CC_PINS_FULL 0x0
+#define CC_PINS_SLIM 0x9F
+
/* Maximum DMA mask supported by IP */
#define DMA_BIT_MASK_LEN 48
@@ -58,10 +68,34 @@
#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+
+#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
+
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+#define CC_CPP_AES_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
+
+#define CC_CPP_SM4_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
+
/* Register name mangling macro */
#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
@@ -74,7 +108,6 @@
#define MAX_REQUEST_QUEUE_SIZE 4096
#define MAX_MLLI_BUFF_SIZE 2080
-#define MAX_ICV_NENTS_SUPPORTED 2
/* Definitions for HW descriptors DIN/DOUT fields */
#define NS_BIT 1
@@ -83,20 +116,18 @@
* field in the HW descriptor. The DMA engine +8 that value.
*/
+struct cc_cpp_req {
+ bool is_cpp;
+ enum cc_cpp_alg alg;
+ u8 slot;
+};
+
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
struct cc_crypto_req {
void (*user_cb)(struct device *dev, void *req, int err);
void *user_arg;
- dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
- /* For the first 'ivgen_dma_addr_len' addresses of this array,
- * generated IV would be placed in it by send_request().
- * Same generated IV for all addresses!
- */
- /* Amount of 'ivgen_dma_addr' elements to be filled. */
- unsigned int ivgen_dma_addr_len;
- /* The generated IV size required, 8/16 B allowed. */
- unsigned int ivgen_size;
struct completion seq_compl; /* request completion */
+ struct cc_cpp_req cpp;
};
/**
@@ -104,13 +135,11 @@
* @cc_base: virt address of the CC registers
* @irq: device IRQ number
* @irq_mask: Interrupt mask shadow (1 for masked interrupts)
- * @fw_ver: SeP loaded firmware version
*/
struct cc_drvdata {
void __iomem *cc_base;
int irq;
u32 irq_mask;
- u32 fw_ver;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
cc_sram_addr_t mlli_sram_addr;
@@ -120,17 +149,18 @@
void *aead_handle;
void *request_mgr_handle;
void *fips_handle;
- void *ivgen_handle;
void *sram_mgr_handle;
void *debugfs;
struct clk *clk;
bool coherent;
char *hw_rev_name;
enum cc_hw_rev hw_rev;
- u32 hash_len_sz;
u32 axim_mon_offset;
u32 sig_offset;
u32 ver_offset;
+ int std_bodies;
+ bool sec_disabled;
+ u32 comp_mask;
};
struct cc_crypto_alg {
@@ -156,12 +186,15 @@
int flow_mode; /* Note: currently, refers to the cipher mode only. */
int auth_mode;
u32 min_hw_rev;
+ enum cc_std_body std_body;
+ bool sec_func;
unsigned int data_unit;
struct cc_drvdata *drvdata;
};
struct async_gen_req_ctx {
dma_addr_t iv_dma_addr;
+ u8 *iv;
enum drv_crypto_direction op_type;
};
@@ -178,10 +211,12 @@
__dump_byte_array(name, the_array, size);
}
+bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
void fini_cc_regs(struct cc_drvdata *drvdata);
int cc_clk_on(struct cc_drvdata *drvdata);
void cc_clk_off(struct cc_drvdata *drvdata);
+unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
{
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index b4d0a6d..4c8bce3 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/fips.h>
+#include <linux/notifier.h>
#include "cc_driver.h"
#include "cc_fips.h"
@@ -11,6 +12,8 @@
struct cc_fips_handle {
struct tasklet_struct tasklet;
+ struct notifier_block nb;
+ struct cc_drvdata *drvdata;
};
/* The function called once at driver entry point to check
@@ -21,7 +24,13 @@
u32 reg;
reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
- return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+ /* Did the TEE report status? */
+ if (reg & CC_FIPS_SYNC_TEE_STATUS)
+ /* Yes. Is it OK? */
+ return (reg & CC_FIPS_SYNC_MODULE_OK);
+
+ /* No. It's either not in use or will be reported later */
+ return true;
}
/*
@@ -40,6 +49,21 @@
cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
}
+/* Push REE side FIPS test failure to TEE side */
+static int cc_ree_fips_failure(struct notifier_block *nb, unsigned long unused1,
+ void *unused2)
+{
+ struct cc_fips_handle *fips_h =
+ container_of(nb, struct cc_fips_handle, nb);
+ struct cc_drvdata *drvdata = fips_h->drvdata;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ cc_set_ree_fips_status(drvdata, false);
+ dev_info(dev, "Notifying TEE of FIPS test failure...\n");
+
+ return NOTIFY_OK;
+}
+
void cc_fips_fini(struct cc_drvdata *drvdata)
{
struct cc_fips_handle *fips_h = drvdata->fips_handle;
@@ -47,10 +71,10 @@
if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h)
return;
+ atomic_notifier_chain_unregister(&fips_fail_notif_chain, &fips_h->nb);
+
/* Kill tasklet */
tasklet_kill(&fips_h->tasklet);
-
- kfree(fips_h);
drvdata->fips_handle = NULL;
}
@@ -72,20 +96,28 @@
dev_err(dev, "TEE reported error!\n");
}
+/*
+ * This function check if cryptocell tee fips error occurred
+ * and in such case triggers system error
+ */
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
+{
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error(dev);
+}
+
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
- struct device *dev = drvdata_to_dev(drvdata);
- u32 irq, state, val;
+ u32 irq, val;
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) {
- state = cc_ioread(drvdata, CC_REG(GPR_HOST));
-
- if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(drvdata);
}
/* after verifing that there is nothing to do,
@@ -104,7 +136,7 @@
if (p_drvdata->hw_rev < CC_HW_REV_712)
return 0;
- fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
if (!fips_h)
return -ENOMEM;
@@ -112,9 +144,11 @@
dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
+ fips_h->drvdata = p_drvdata;
+ fips_h->nb.notifier_call = cc_ree_fips_failure;
+ atomic_notifier_chain_register(&fips_fail_notif_chain, &fips_h->nb);
- if (!cc_get_tee_fips_status(p_drvdata))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(p_drvdata);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 645e096..fc33eeb 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_FIPS_H__
#define __CC_FIPS_H__
@@ -18,6 +18,7 @@
void cc_fips_fini(struct cc_drvdata *drvdata);
void fips_handler(struct cc_drvdata *drvdata);
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
#else /* CONFIG_CRYPTO_FIPS */
@@ -30,6 +31,7 @@
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
bool ok) {}
static inline void fips_handler(struct cc_drvdata *drvdata) {}
+static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index b931330..bc71bdf 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
+#include <crypto/sm3.h>
#include <crypto/internal/hash.h>
#include "cc_driver.h"
@@ -16,6 +17,7 @@
#define CC_MAX_HASH_SEQ_LEN 12
#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
+#define CC_SM3_HASH_LEN_SIZE 8
struct cc_hash_handle {
cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
@@ -23,26 +25,29 @@
struct list_head hash_list;
};
-static const u32 digest_len_init[] = {
+static const u32 cc_digest_len_init[] = {
0x00000040, 0x00000000, 0x00000000, 0x00000000 };
-static const u32 md5_init[] = {
+static const u32 cc_md5_init[] = {
SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const u32 sha1_init[] = {
+static const u32 cc_sha1_init[] = {
SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const u32 sha224_init[] = {
+static const u32 cc_sha224_init[] = {
SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
-static const u32 sha256_init[] = {
+static const u32 cc_sha256_init[] = {
SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
-static const u32 digest_len_sha512_init[] = {
+static const u32 cc_digest_len_sha512_init[] = {
0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static u64 sha384_init[] = {
+static u64 cc_sha384_init[] = {
SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static u64 sha512_init[] = {
+static u64 cc_sha512_init[] = {
SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+static const u32 cc_sm3_init[] = {
+ SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
+ SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size);
@@ -64,6 +69,7 @@
struct hash_key_req_ctx {
u32 keylen;
dma_addr_t key_dma_addr;
+ u8 *key;
};
/* hash per-session context */
@@ -82,6 +88,7 @@
int hash_mode;
int hw_mode;
int inter_digestsize;
+ unsigned int hash_len;
struct completion setkey_comp;
bool is_hmac;
};
@@ -137,11 +144,12 @@
if (ctx->hash_mode == DRV_HASH_SHA512 ||
ctx->hash_mode == DRV_HASH_SHA384)
memcpy(state->digest_bytes_len,
- digest_len_sha512_init,
- ctx->drvdata->hash_len_sz);
+ cc_digest_len_sha512_init,
+ ctx->hash_len);
else
- memcpy(state->digest_bytes_len, digest_len_init,
- ctx->drvdata->hash_len_sz);
+ memcpy(state->digest_bytes_len,
+ cc_digest_len_init,
+ ctx->hash_len);
}
if (ctx->hash_mode != DRV_HASH_NULL) {
@@ -274,9 +282,13 @@
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static void cc_digest_complete(struct device *dev, void *cc_req, int err)
@@ -289,10 +301,14 @@
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static void cc_hash_complete(struct device *dev, void *cc_req, int err)
@@ -305,10 +321,14 @@
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
@@ -321,7 +341,7 @@
/* Get final MAC result */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
/* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, 1);
@@ -367,7 +387,7 @@
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_sram(&desc[idx],
cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -440,7 +460,7 @@
* digest
*/
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
if (is_hmac) {
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
@@ -454,14 +474,14 @@
/* Load the hash current length */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
if (is_hmac) {
set_din_type(&desc[idx], DMA_DLLI,
state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT);
+ ctx->hash_len, NS_BIT);
} else {
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
if (nbytes)
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
else
@@ -478,7 +498,7 @@
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ ctx->hash_len, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_cipher_do(&desc[idx], DO_PAD);
@@ -504,7 +524,7 @@
{
/* Restore hash digest */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
@@ -513,10 +533,10 @@
/* Restore hash current length */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT);
+ ctx->hash_len, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -576,7 +596,7 @@
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
@@ -585,9 +605,9 @@
/* store current hash length in context */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 1);
+ ctx->hash_len, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
@@ -649,9 +669,9 @@
/* Pad the hash */
hw_desc_init(&desc[idx]);
set_cipher_do(&desc[idx], DO_PAD);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ ctx->hash_len, NS_BIT, 0);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
idx++;
@@ -724,13 +744,20 @@
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
+ ctx->key_params.key = NULL;
if (keylen) {
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
- key, keylen);
+ ctx->key_params.key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -749,7 +776,7 @@
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -831,7 +858,7 @@
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -881,6 +908,9 @@
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
+
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -907,11 +937,16 @@
ctx->key_params.keylen = keylen;
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -963,6 +998,8 @@
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -1069,6 +1106,16 @@
return -ENOMEM;
}
+static int cc_get_hash_len(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->hash_mode == DRV_HASH_SM3)
+ return CC_SM3_HASH_LEN_SIZE;
+ else
+ return cc_get_default_hash_len(ctx->drvdata);
+}
+
static int cc_cra_init(struct crypto_tfm *tfm)
{
struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1086,7 +1133,7 @@
ctx->hw_mode = cc_alg->hw_mode;
ctx->inter_digestsize = cc_alg->inter_digestsize;
ctx->drvdata = cc_alg->drvdata;
-
+ ctx->hash_len = cc_get_hash_len(tfm);
return cc_alloc_ctx(ctx);
}
@@ -1465,8 +1512,8 @@
memcpy(out, state->digest_buff, ctx->inter_digestsize);
out += ctx->inter_digestsize;
- memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
- out += ctx->drvdata->hash_len_sz;
+ memcpy(out, state->digest_bytes_len, ctx->hash_len);
+ out += ctx->hash_len;
memcpy(out, &curr_buff_cnt, sizeof(u32));
out += sizeof(u32);
@@ -1494,8 +1541,8 @@
memcpy(state->digest_buff, in, ctx->inter_digestsize);
in += ctx->inter_digestsize;
- memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
- in += ctx->drvdata->hash_len_sz;
+ memcpy(state->digest_bytes_len, in, ctx->hash_len);
+ in += ctx->hash_len;
/* Sanity check the data as much as possible */
memcpy(&tmp, in, sizeof(u32));
@@ -1515,6 +1562,7 @@
char mac_name[CRYPTO_MAX_ALG_NAME];
char mac_driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
+ bool is_mac;
bool synchronize;
struct ahash_alg template_ahash;
int hash_mode;
@@ -1522,6 +1570,7 @@
int inter_digestsize;
struct cc_drvdata *drvdata;
u32 min_hw_rev;
+ enum cc_std_body std_body;
};
#define CC_STATE_SIZE(_x) \
@@ -1536,6 +1585,7 @@
.mac_name = "hmac(sha1)",
.mac_driver_name = "hmac-sha1-ccree",
.blocksize = SHA1_BLOCK_SIZE,
+ .is_mac = true,
.synchronize = false,
.template_ahash = {
.init = cc_hash_init,
@@ -1555,6 +1605,7 @@
.hw_mode = DRV_HASH_HW_SHA1,
.inter_digestsize = SHA1_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha256",
@@ -1562,6 +1613,7 @@
.mac_name = "hmac(sha256)",
.mac_driver_name = "hmac-sha256-ccree",
.blocksize = SHA256_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1580,6 +1632,7 @@
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha224",
@@ -1587,6 +1640,7 @@
.mac_name = "hmac(sha224)",
.mac_driver_name = "hmac-sha224-ccree",
.blocksize = SHA224_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1598,13 +1652,14 @@
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA224,
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha384",
@@ -1612,6 +1667,7 @@
.mac_name = "hmac(sha384)",
.mac_driver_name = "hmac-sha384-ccree",
.blocksize = SHA384_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1623,13 +1679,14 @@
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA384,
.hw_mode = DRV_HASH_HW_SHA512,
.inter_digestsize = SHA512_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha512",
@@ -1637,6 +1694,7 @@
.mac_name = "hmac(sha512)",
.mac_driver_name = "hmac-sha512-ccree",
.blocksize = SHA512_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1655,6 +1713,7 @@
.hw_mode = DRV_HASH_HW_SHA512,
.inter_digestsize = SHA512_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "md5",
@@ -1662,6 +1721,7 @@
.mac_name = "hmac(md5)",
.mac_driver_name = "hmac-md5-ccree",
.blocksize = MD5_HMAC_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1680,11 +1740,38 @@
.hw_mode = DRV_HASH_HW_MD5,
.inter_digestsize = MD5_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "sm3",
+ .driver_name = "sm3-ccree",
+ .blocksize = SM3_BLOCK_SIZE,
+ .is_mac = false,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SM3,
+ .hw_mode = DRV_HASH_HW_SM3,
+ .inter_digestsize = SM3_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
},
{
.mac_name = "xcbc(aes)",
.mac_driver_name = "xcbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_mac_update,
@@ -1703,11 +1790,13 @@
.hw_mode = DRV_CIPHER_XCBC_MAC,
.inter_digestsize = AES_BLOCK_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.mac_name = "cmac(aes)",
.mac_driver_name = "cmac-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_mac_update,
@@ -1726,6 +1815,7 @@
.hw_mode = DRV_CIPHER_CMAC,
.inter_digestsize = AES_BLOCK_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
};
@@ -1780,29 +1870,30 @@
unsigned int larval_seq_len = 0;
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
+ bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
int rc = 0;
/* Copy-to-sram digest-len */
- cc_set_sram_desc(digest_len_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_init), larval_seq,
+ cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_digest_len_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(digest_len_init);
+ sram_buff_ofs += sizeof(cc_digest_len_init);
larval_seq_len = 0;
if (large_sha_supported) {
/* Copy-to-sram digest-len for sha384/512 */
- cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_sha512_init),
+ cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_digest_len_sha512_init),
larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(digest_len_sha512_init);
+ sram_buff_ofs += sizeof(cc_digest_len_sha512_init);
larval_seq_len = 0;
}
@@ -1810,53 +1901,64 @@
hash_handle->larval_digest_sram_addr = sram_buff_ofs;
/* Copy-to-sram initial SHA* digests */
- cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
+ cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init),
larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(md5_init);
+ sram_buff_ofs += sizeof(cc_md5_init);
larval_seq_len = 0;
- cc_set_sram_desc(sha1_init, sram_buff_ofs,
- ARRAY_SIZE(sha1_init), larval_seq,
+ cc_set_sram_desc(cc_sha1_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_sha1_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(sha1_init);
+ sram_buff_ofs += sizeof(cc_sha1_init);
larval_seq_len = 0;
- cc_set_sram_desc(sha224_init, sram_buff_ofs,
- ARRAY_SIZE(sha224_init), larval_seq,
+ cc_set_sram_desc(cc_sha224_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_sha224_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(sha224_init);
+ sram_buff_ofs += sizeof(cc_sha224_init);
larval_seq_len = 0;
- cc_set_sram_desc(sha256_init, sram_buff_ofs,
- ARRAY_SIZE(sha256_init), larval_seq,
+ cc_set_sram_desc(cc_sha256_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_sha256_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(sha256_init);
+ sram_buff_ofs += sizeof(cc_sha256_init);
larval_seq_len = 0;
- if (large_sha_supported) {
- cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
- (ARRAY_SIZE(sha384_init) * 2), larval_seq,
+ if (sm3_supported) {
+ cc_set_sram_desc(cc_sm3_init, sram_buff_ofs,
+ ARRAY_SIZE(cc_sm3_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
goto init_digest_const_err;
- sram_buff_ofs += sizeof(sha384_init);
+ sram_buff_ofs += sizeof(cc_sm3_init);
+ larval_seq_len = 0;
+ }
+
+ if (large_sha_supported) {
+ cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
+ (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(cc_sha384_init);
larval_seq_len = 0;
- cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
- (ARRAY_SIZE(sha512_init) * 2), larval_seq,
+ cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
+ (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (rc)
@@ -1885,8 +1987,8 @@
*/
void __init cc_hash_global_init(void)
{
- cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
- cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
+ cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
+ cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
}
int cc_hash_alloc(struct cc_drvdata *drvdata)
@@ -1905,15 +2007,18 @@
INIT_LIST_HEAD(&hash_handle->hash_list);
drvdata->hash_handle = hash_handle;
- sram_size_to_alloc = sizeof(digest_len_init) +
- sizeof(md5_init) +
- sizeof(sha1_init) +
- sizeof(sha224_init) +
- sizeof(sha256_init);
+ sram_size_to_alloc = sizeof(cc_digest_len_init) +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init) +
+ sizeof(cc_sha256_init);
+
+ if (drvdata->hw_rev >= CC_HW_REV_713)
+ sram_size_to_alloc += sizeof(cc_sm3_init);
if (drvdata->hw_rev >= CC_HW_REV_712)
- sram_size_to_alloc += sizeof(digest_len_sha512_init) +
- sizeof(sha384_init) + sizeof(sha512_init);
+ sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
+ sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
@@ -1937,30 +2042,33 @@
struct cc_hash_alg *t_alg;
int hw_mode = driver_hash[alg].hw_mode;
- /* We either support both HASH and MAC or none */
- if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
+ /* Check that the HW revision and variants are suitable */
+ if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & driver_hash[alg].std_body))
continue;
- /* register hmac version */
- t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
- if (IS_ERR(t_alg)) {
- rc = PTR_ERR(t_alg);
- dev_err(dev, "%s alg allocation failed\n",
- driver_hash[alg].driver_name);
- goto fail;
- }
- t_alg->drvdata = drvdata;
+ if (driver_hash[alg].is_mac) {
+ /* register hmac version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
- rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (rc) {
- dev_err(dev, "%s alg registration failed\n",
- driver_hash[alg].driver_name);
- kfree(t_alg);
- goto fail;
- } else {
- list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry,
+ &hash_handle->hash_list);
+ }
}
-
if (hw_mode == DRV_CIPHER_XCBC_MAC ||
hw_mode == DRV_CIPHER_CMAC)
continue;
@@ -2027,7 +2135,7 @@
XCBC_MAC_K1_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_AES);
@@ -2151,20 +2259,22 @@
{
switch (mode) {
case DRV_HASH_MD5:
- return md5_init;
+ return cc_md5_init;
case DRV_HASH_SHA1:
- return sha1_init;
+ return cc_sha1_init;
case DRV_HASH_SHA224:
- return sha224_init;
+ return cc_sha224_init;
case DRV_HASH_SHA256:
- return sha256_init;
+ return cc_sha256_init;
case DRV_HASH_SHA384:
- return sha384_init;
+ return cc_sha384_init;
case DRV_HASH_SHA512:
- return sha512_init;
+ return cc_sha512_init;
+ case DRV_HASH_SM3:
+ return cc_sm3_init;
default:
dev_err(dev, "Invalid hash mode (%d)\n", mode);
- return md5_init;
+ return cc_md5_init;
}
}
@@ -2182,6 +2292,8 @@
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
+ bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
+ cc_sram_addr_t addr;
switch (mode) {
case DRV_HASH_NULL:
@@ -2190,29 +2302,41 @@
return (hash_handle->larval_digest_sram_addr);
case DRV_HASH_SHA1:
return (hash_handle->larval_digest_sram_addr +
- sizeof(md5_init));
+ sizeof(cc_md5_init));
case DRV_HASH_SHA224:
return (hash_handle->larval_digest_sram_addr +
- sizeof(md5_init) +
- sizeof(sha1_init));
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init));
case DRV_HASH_SHA256:
return (hash_handle->larval_digest_sram_addr +
- sizeof(md5_init) +
- sizeof(sha1_init) +
- sizeof(sha224_init));
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init));
+ case DRV_HASH_SM3:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init) +
+ sizeof(cc_sha256_init));
case DRV_HASH_SHA384:
- return (hash_handle->larval_digest_sram_addr +
- sizeof(md5_init) +
- sizeof(sha1_init) +
- sizeof(sha224_init) +
- sizeof(sha256_init));
+ addr = (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init) +
+ sizeof(cc_sha256_init));
+ if (sm3_supported)
+ addr += sizeof(cc_sm3_init);
+ return addr;
case DRV_HASH_SHA512:
- return (hash_handle->larval_digest_sram_addr +
- sizeof(md5_init) +
- sizeof(sha1_init) +
- sizeof(sha224_init) +
- sizeof(sha256_init) +
- sizeof(sha384_init));
+ addr = (hash_handle->larval_digest_sram_addr +
+ sizeof(cc_md5_init) +
+ sizeof(cc_sha1_init) +
+ sizeof(cc_sha224_init) +
+ sizeof(cc_sha256_init) +
+ sizeof(cc_sha384_init));
+ if (sm3_supported)
+ addr += sizeof(cc_sm3_init);
+ return addr;
default:
dev_err(dev, "Invalid hash mode (%d)\n", mode);
}
@@ -2237,7 +2361,7 @@
#if (CC_DEV_SHA_MAX > 256)
case DRV_HASH_SHA384:
case DRV_HASH_SHA512:
- return digest_len_addr + sizeof(digest_len_init);
+ return digest_len_addr + sizeof(cc_digest_len_init);
#endif
default:
return digest_len_addr; /*to avoid kernel crash*/
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 2e5bf8b..0d6dc61 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_hash.h
* ARM CryptoCell Hash Crypto API
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index 616b2e1..efe3e1d 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#ifndef __CC_HOST_H__
#define __CC_HOST_H__
@@ -7,33 +7,102 @@
// --------------------------------------
// BLOCK: HOST_P
// --------------------------------------
+
+
+/* IRR */
#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT 0x3UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT 0x4UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT 0x5UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT 0x6UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT 0x7UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT 0x9UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT 0xCUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT 0xDUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT 0xEUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT 0xFUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT 0x10UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT 0x11UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT 0x12UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT 0x14UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
-#define CC_HOST_IMR_REG_OFFSET 0xA04UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+
+/* IMR */
+#define CC_HOST_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT 0x3UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT 0x4UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT 0x5UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT 0x6UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT 0x7UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT 0x9UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT 0xAUL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT 0xCUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT 0xDUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT 0xEUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT 0xFUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT 0x10UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT 0x11UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT 0x12UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT 0x14UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+
+/* ICR */
#define CC_HOST_ICR_REG_OFFSET 0xA08UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
@@ -45,6 +114,12 @@
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_NVM_IS_IDLE_REG_OFFSET 0x0A10UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
+#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
+#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
@@ -131,6 +206,66 @@
#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REG_OFFSET 0x0A7CUL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SHIFT 0x0UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SHIFT 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SHIFT 0x2UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SHIFT 0x3UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SHIFT 0x4UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SHIFT 0x5UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SHIFT 0x6UL
+#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SIZE 0x1UL
+#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SHIFT 0x7UL
+#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SIZE 0x1UL
+// --------------------------------------
+// BLOCK: ID_REGISTERS
+// --------------------------------------
+#define CC_PERIPHERAL_ID_4_REG_OFFSET 0x0FD0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE 0x4UL
+#define CC_PIDRESERVED0_REG_OFFSET 0x0FD4UL
+#define CC_PIDRESERVED1_REG_OFFSET 0x0FD8UL
+#define CC_PIDRESERVED2_REG_OFFSET 0x0FDCUL
+#define CC_PERIPHERAL_ID_0_REG_OFFSET 0x0FE0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_PERIPHERAL_ID_1_REG_OFFSET 0x0FE4UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_2_REG_OFFSET 0x0FE8UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE 0x1UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REG_OFFSET 0x0FECUL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_0_REG_OFFSET 0x0FF0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_1_REG_OFFSET 0x0FF4UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_2_REG_OFFSET 0x0FF8UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_3_REG_OFFSET 0x0FFCUL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE 0x8UL
// --------------------------------------
// BLOCK: HOST_SRAM
// --------------------------------------
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index a091ae5..9f4db99 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_HW_QUEUE_DEFS_H__
#define __CC_HW_QUEUE_DEFS_H__
@@ -28,11 +28,13 @@
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
#define WORD0_VALUE CC_GENMASK(0, VALUE)
+#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
#define WORD2_VALUE CC_GENMASK(2, VALUE)
#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
@@ -42,6 +44,7 @@
#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY)
#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
@@ -107,6 +110,7 @@
AES_to_AES_to_HASH_and_DOUT = 13,
AES_to_AES_to_HASH = 14,
AES_to_HASH_and_AES = 15,
+ DIN_SM4_DOUT = 16,
DIN_AES_AESMAC = 17,
HASH_to_DOUT = 18,
/* setup flows */
@@ -114,9 +118,11 @@
S_DIN_to_AES2 = 33,
S_DIN_to_DES = 34,
S_DIN_to_RC4 = 35,
+ S_DIN_to_SM4 = 36,
S_DIN_to_HASH = 37,
S_AES_to_DOUT = 38,
S_AES2_to_DOUT = 39,
+ S_SM4_to_DOUT = 40,
S_RC4_to_DOUT = 41,
S_DES_to_DOUT = 42,
S_HASH_to_DOUT = 43,
@@ -172,6 +178,15 @@
END_OF_KEYS = S32_MAX,
};
+#define CC_NUM_HW_KEY_SLOTS 4
+#define CC_FIRST_HW_KEY_SLOT 0
+#define CC_LAST_HW_KEY_SLOT (CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
+
+#define CC_NUM_CPP_KEY_SLOTS 8
+#define CC_FIRST_CPP_KEY_SLOT 16
+#define CC_LAST_CPP_KEY_SLOT (CC_FIRST_CPP_KEY_SLOT + \
+ CC_NUM_CPP_KEY_SLOTS - 1)
+
enum cc_hw_aes_key_size {
AES_128_KEY = 0,
AES_192_KEY = 1,
@@ -185,6 +200,9 @@
HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
};
+#define CC_CPP_DIN_ADDR 0xFF00FF00UL
+#define CC_CPP_DIN_SIZE 0xFF00FFUL
+
/*****************************/
/* Descriptor packing macros */
/*****************************/
@@ -245,6 +263,25 @@
}
/*
+ * Setup the special CPP descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @alg: cipher used (AES / SM4)
+ * @mode: mode used (CTR or CBC)
+ * @slot: slot number
+ * @ksize: key size
+ */
+static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
+{
+ pdesc->word[0] |= CC_CPP_DIN_ADDR;
+
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
+ pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
+
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
+}
+
+/*
* Set the DIN field of a HW descriptors to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
@@ -394,6 +431,16 @@
}
/*
+ * Set aes xor crypto key, this in some secenrios select SM3 engine
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1);
+}
+
+/*
* Set the DOUT field of a HW descriptors to SRAM mode
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
@@ -449,20 +496,34 @@
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
- enum drv_cipher_mode mode)
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
}
/*
+ * Set the cipher mode for hash algorithms.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @cipher_mode: Any one of the modes defined in [CC7x-DESC]
+ * @hash_mode: specifies which hash is being handled
+ */
+static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
+ enum drv_cipher_mode cipher_mode,
+ enum drv_hash_mode hash_mode)
+{
+ set_cipher_mode(pdesc, cipher_mode);
+ if (hash_mode == DRV_HASH_SM3)
+ set_aes_xor_crypto_key(pdesc);
+}
+
+/*
* Set the cipher configuration fields.
*
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
- enum drv_crypto_direction mode)
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
}
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
deleted file mode 100644
index 7694583..0000000
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ /dev/null
@@ -1,279 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-
-#include <crypto/ctr.h>
-#include "cc_driver.h"
-#include "cc_ivgen.h"
-#include "cc_request_mgr.h"
-#include "cc_sram_mgr.h"
-#include "cc_buffer_mgr.h"
-
-/* The max. size of pool *MUST* be <= SRAM total size */
-#define CC_IVPOOL_SIZE 1024
-/* The first 32B fraction of pool are dedicated to the
- * next encryption "key" & "IV" for pool regeneration
- */
-#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
-#define CC_IVPOOL_GEN_SEQ_LEN 4
-
-/**
- * struct cc_ivgen_ctx -IV pool generation context
- * @pool: the start address of the iv-pool resides in internal RAM
- * @ctr_key_dma: address of pool's encryption key material in internal RAM
- * @ctr_iv_dma: address of pool's counter iv in internal RAM
- * @next_iv_ofs: the offset to the next available IV in pool
- * @pool_meta: virt. address of the initial enc. key/IV
- * @pool_meta_dma: phys. address of the initial enc. key/IV
- */
-struct cc_ivgen_ctx {
- cc_sram_addr_t pool;
- cc_sram_addr_t ctr_key;
- cc_sram_addr_t ctr_iv;
- u32 next_iv_ofs;
- u8 *pool_meta;
- dma_addr_t pool_meta_dma;
-};
-
-/*!
- * Generates CC_IVPOOL_SIZE of random bytes by
- * encrypting 0's using AES128-CTR.
- *
- * \param ivgen iv-pool context
- * \param iv_seq IN/OUT array to the descriptors sequence
- * \param iv_seq_len IN/OUT pointer to the sequence length
- */
-static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
- struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
-{
- unsigned int idx = *iv_seq_len;
-
- if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
- /* The sequence will be longer than allowed */
- return -EINVAL;
- }
- /* Setup key */
- hw_desc_init(&iv_seq[idx]);
- set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
- set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
- set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
- set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
- set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
- idx++;
-
- /* Setup cipher state */
- hw_desc_init(&iv_seq[idx]);
- set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
- set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
- set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
- set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
- set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
- set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
- idx++;
-
- /* Perform dummy encrypt to skip first block */
- hw_desc_init(&iv_seq[idx]);
- set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
- set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
- set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
- idx++;
-
- /* Generate IV pool */
- hw_desc_init(&iv_seq[idx]);
- set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
- set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
- set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
- idx++;
-
- *iv_seq_len = idx; /* Update sequence length */
-
- /* queue ordering assures pool readiness */
- ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
-
- return 0;
-}
-
-/*!
- * Generates the initial pool in SRAM.
- * This function should be invoked when resuming driver.
- *
- * \param drvdata
- *
- * \return int Zero for success, negative value otherwise.
- */
-int cc_init_iv_sram(struct cc_drvdata *drvdata)
-{
- struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
- struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
- unsigned int iv_seq_len = 0;
- int rc;
-
- /* Generate initial enc. key/iv */
- get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
-
- /* The first 32B reserved for the enc. Key/IV */
- ivgen_ctx->ctr_key = ivgen_ctx->pool;
- ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
-
- /* Copy initial enc. key and IV to SRAM at a single descriptor */
- hw_desc_init(&iv_seq[iv_seq_len]);
- set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
- CC_IVPOOL_META_SIZE, NS_BIT);
- set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
- CC_IVPOOL_META_SIZE);
- set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
- iv_seq_len++;
-
- /* Generate initial pool */
- rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
- if (rc)
- return rc;
-
- /* Fire-and-forget */
- return send_request_init(drvdata, iv_seq, iv_seq_len);
-}
-
-/*!
- * Free iv-pool and ivgen context.
- *
- * \param drvdata
- */
-void cc_ivgen_fini(struct cc_drvdata *drvdata)
-{
- struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
- struct device *device = &drvdata->plat_dev->dev;
-
- if (!ivgen_ctx)
- return;
-
- if (ivgen_ctx->pool_meta) {
- memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
- dma_free_coherent(device, CC_IVPOOL_META_SIZE,
- ivgen_ctx->pool_meta,
- ivgen_ctx->pool_meta_dma);
- }
-
- ivgen_ctx->pool = NULL_SRAM_ADDR;
-
- /* release "this" context */
- kfree(ivgen_ctx);
-}
-
-/*!
- * Allocates iv-pool and maps resources.
- * This function generates the first IV pool.
- *
- * \param drvdata Driver's private context
- *
- * \return int Zero for success, negative value otherwise.
- */
-int cc_ivgen_init(struct cc_drvdata *drvdata)
-{
- struct cc_ivgen_ctx *ivgen_ctx;
- struct device *device = &drvdata->plat_dev->dev;
- int rc;
-
- /* Allocate "this" context */
- ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
- if (!ivgen_ctx)
- return -ENOMEM;
-
- /* Allocate pool's header for initial enc. key/IV */
- ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
- &ivgen_ctx->pool_meta_dma,
- GFP_KERNEL);
- if (!ivgen_ctx->pool_meta) {
- dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
- CC_IVPOOL_META_SIZE);
- rc = -ENOMEM;
- goto out;
- }
- /* Allocate IV pool in SRAM */
- ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
- if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
- dev_err(device, "SRAM pool exhausted\n");
- rc = -ENOMEM;
- goto out;
- }
-
- drvdata->ivgen_handle = ivgen_ctx;
-
- return cc_init_iv_sram(drvdata);
-
-out:
- cc_ivgen_fini(drvdata);
- return rc;
-}
-
-/*!
- * Acquires 16 Bytes IV from the iv-pool
- *
- * \param drvdata Driver private context
- * \param iv_out_dma Array of physical IV out addresses
- * \param iv_out_dma_len Length of iv_out_dma array (additional elements
- * of iv_out_dma array are ignore)
- * \param iv_out_size May be 8 or 16 bytes long
- * \param iv_seq IN/OUT array to the descriptors sequence
- * \param iv_seq_len IN/OUT pointer to the sequence length
- *
- * \return int Zero for success, negative value otherwise.
- */
-int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
- unsigned int iv_out_dma_len, unsigned int iv_out_size,
- struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
-{
- struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
- unsigned int idx = *iv_seq_len;
- struct device *dev = drvdata_to_dev(drvdata);
- unsigned int t;
-
- if (iv_out_size != CC_AES_IV_SIZE &&
- iv_out_size != CTR_RFC3686_IV_SIZE) {
- return -EINVAL;
- }
- if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
- /* The sequence will be longer than allowed */
- return -EINVAL;
- }
-
- /* check that number of generated IV is limited to max dma address
- * iv buffer size
- */
- if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
- /* The sequence will be longer than allowed */
- return -EINVAL;
- }
-
- for (t = 0; t < iv_out_dma_len; t++) {
- /* Acquire IV from pool */
- hw_desc_init(&iv_seq[idx]);
- set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
- ivgen_ctx->next_iv_ofs),
- iv_out_size);
- set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
- NS_BIT, 0);
- set_flow_mode(&iv_seq[idx], BYPASS);
- idx++;
- }
-
- /* Bypass operation is proceeded by crypto sequence, hence must
- * assure bypass-write-transaction by a memory barrier
- */
- hw_desc_init(&iv_seq[idx]);
- set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
- set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
- idx++;
-
- *iv_seq_len = idx; /* update seq length */
-
- /* Update iv index */
- ivgen_ctx->next_iv_ofs += iv_out_size;
-
- if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
- dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
- /* pool is drained -regenerate it! */
- return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
- }
-
- return 0;
-}
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
deleted file mode 100644
index b6ac169..0000000
--- a/drivers/crypto/ccree/cc_ivgen.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
-
-#ifndef __CC_IVGEN_H__
-#define __CC_IVGEN_H__
-
-#include "cc_hw_queue_defs.h"
-
-#define CC_IVPOOL_SEQ_LEN 8
-
-/*!
- * Allocates iv-pool and maps resources.
- * This function generates the first IV pool.
- *
- * \param drvdata Driver's private context
- *
- * \return int Zero for success, negative value otherwise.
- */
-int cc_ivgen_init(struct cc_drvdata *drvdata);
-
-/*!
- * Free iv-pool and ivgen context.
- *
- * \param drvdata
- */
-void cc_ivgen_fini(struct cc_drvdata *drvdata);
-
-/*!
- * Generates the initial pool in SRAM.
- * This function should be invoked when resuming DX driver.
- *
- * \param drvdata
- *
- * \return int Zero for success, negative value otherwise.
- */
-int cc_init_iv_sram(struct cc_drvdata *drvdata);
-
-/*!
- * Acquires 16 Bytes IV from the iv-pool
- *
- * \param drvdata Driver private context
- * \param iv_out_dma Array of physical IV out addresses
- * \param iv_out_dma_len Length of iv_out_dma array (additional elements of
- * iv_out_dma array are ignore)
- * \param iv_out_size May be 8 or 16 bytes long
- * \param iv_seq IN/OUT array to the descriptors sequence
- * \param iv_seq_len IN/OUT pointer to the sequence length
- *
- * \return int Zero for success, negative value otherwise.
- */
-int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
- unsigned int iv_out_dma_len, unsigned int iv_out_size,
- struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
-
-#endif /*__CC_IVGEN_H__*/
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index 8d7262a..582bae4 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_CRYS_KERNEL_H__
#define __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
index 64b15ac..f891ab8 100644
--- a/drivers/crypto/ccree/cc_lli_defs.h
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_LLI_DEFS_H_
#define _CC_LLI_DEFS_H_
@@ -14,7 +14,7 @@
#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
-#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d990f47..dbc508f 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -8,9 +8,9 @@
#include "cc_buffer_mgr.h"
#include "cc_request_mgr.h"
#include "cc_sram_mgr.h"
-#include "cc_ivgen.h"
#include "cc_hash.h"
#include "cc_pm.h"
+#include "cc_fips.h"
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
@@ -25,13 +25,13 @@
int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = cc_suspend_req_queue(drvdata);
if (rc) {
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
return rc;
}
fini_cc_regs(drvdata);
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
return 0;
}
@@ -42,19 +42,26 @@
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
+ /* Enables the device source clk */
rc = cc_clk_on(drvdata);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
+ /* wait for Crytpcell reset completion */
+ if (!cc_wait_for_reset_completion(drvdata)) {
+ dev_err(dev, "Cryptocell reset not completed");
+ return -EBUSY;
+ }
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = init_cc_regs(drvdata, false);
if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc;
}
+ /* check if tee fips error occurred during power down */
+ cc_tee_handle_fips_error(drvdata);
rc = cc_resume_req_queue(drvdata);
if (rc) {
@@ -65,7 +72,6 @@
/* must be after the queue resuming as it uses the HW queue*/
cc_init_hash_sram(drvdata);
- cc_init_iv_sram(drvdata);
return 0;
}
@@ -98,22 +104,27 @@
return rc;
}
+bool cc_pm_is_dev_suspended(struct device *dev)
+{
+ /* check device state using runtime api */
+ return pm_runtime_suspended(dev);
+}
+
int cc_pm_init(struct cc_drvdata *drvdata)
{
- int rc = 0;
struct device *dev = drvdata_to_dev(drvdata);
/* must be before the enabling to avoid resdundent suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
/* activate the PM module */
- rc = pm_runtime_set_active(dev);
- if (rc)
- return rc;
- /* enable the PM module*/
- pm_runtime_enable(dev);
+ return pm_runtime_set_active(dev);
+}
- return rc;
+/* enable the PM module*/
+void cc_pm_go(struct cc_drvdata *drvdata)
+{
+ pm_runtime_enable(drvdata_to_dev(drvdata));
}
void cc_pm_fini(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 020a540..a7d98a5 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_pm.h
*/
@@ -16,11 +16,13 @@
extern const struct dev_pm_ops ccree_pm;
int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_go(struct cc_drvdata *drvdata);
void cc_pm_fini(struct cc_drvdata *drvdata);
int cc_pm_suspend(struct device *dev);
int cc_pm_resume(struct device *dev);
int cc_pm_get(struct device *dev);
int cc_pm_put_suspend(struct device *dev);
+bool cc_pm_is_dev_suspended(struct device *dev);
#else
@@ -29,6 +31,8 @@
return 0;
}
+static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
+
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
static inline int cc_pm_suspend(struct device *dev)
@@ -51,6 +55,12 @@
return 0;
}
+static inline bool cc_pm_is_dev_suspended(struct device *dev)
+{
+ /* if PM not supported device is never suspend */
+ return false;
+}
+
#endif
#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 83a8aaa..a947d5a 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
+#include <linux/nospec.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
#include "cc_request_mgr.h"
-#include "cc_ivgen.h"
#include "cc_pm.h"
#define CC_MAX_POLL_ITER 10
@@ -52,11 +52,38 @@
bool notif;
};
+static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
+};
+
static void comp_handler(unsigned long devarg);
#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work);
#endif
+static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
+{
+ alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
+ slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
+
+ return cc_cpp_int_masks[alg][slot];
+}
+
void cc_req_mgr_fini(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -253,36 +280,12 @@
static int cc_do_send_request(struct cc_drvdata *drvdata,
struct cc_crypto_req *cc_req,
struct cc_hw_desc *desc, unsigned int len,
- bool add_comp, bool ivgen)
+ bool add_comp)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
unsigned int used_sw_slots;
- unsigned int iv_seq_len = 0;
unsigned int total_seq_len = len; /*initial sequence length*/
- struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
struct device *dev = drvdata_to_dev(drvdata);
- int rc;
-
- if (ivgen) {
- dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
- cc_req->ivgen_dma_addr_len,
- &cc_req->ivgen_dma_addr[0],
- &cc_req->ivgen_dma_addr[1],
- &cc_req->ivgen_dma_addr[2],
- cc_req->ivgen_size);
-
- /* Acquire IV from pool */
- rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
- cc_req->ivgen_dma_addr_len,
- cc_req->ivgen_size, iv_seq, &iv_seq_len);
-
- if (rc) {
- dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
- return rc;
- }
-
- total_seq_len += iv_seq_len;
- }
used_sw_slots = ((req_mgr_h->req_queue_head -
req_mgr_h->req_queue_tail) &
@@ -306,8 +309,6 @@
wmb();
/* STAT_PHASE_4: Push sequence */
- if (ivgen)
- enqueue_seq(drvdata, iv_seq, iv_seq_len);
enqueue_seq(drvdata, desc, len);
@@ -336,10 +337,12 @@
struct cc_bl_item *bli)
{
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
spin_lock_bh(&mgr->bl_lock);
list_add_tail(&bli->list, &mgr->backlog);
++mgr->bl_len;
+ dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
spin_unlock_bh(&mgr->bl_lock);
tasklet_schedule(&mgr->comptask);
}
@@ -349,9 +352,7 @@
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
struct cc_bl_item *bli;
struct cc_crypto_req *creq;
- struct crypto_async_request *req;
- bool ivgen;
- unsigned int total_len;
+ void *req;
struct device *dev = drvdata_to_dev(drvdata);
int rc;
@@ -359,26 +360,26 @@
while (mgr->bl_len) {
bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
+
spin_unlock(&mgr->bl_lock);
+
creq = &bli->creq;
- req = (struct crypto_async_request *)creq->user_arg;
+ req = creq->user_arg;
/*
* Notify the request we're moving out of the backlog
* but only if we haven't done so already.
*/
if (!bli->notif) {
- req->complete(req, -EINPROGRESS);
+ creq->user_cb(dev, req, -EINPROGRESS);
bli->notif = true;
}
- ivgen = !!creq->ivgen_dma_addr_len;
- total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
-
spin_lock(&mgr->hw_lock);
- rc = cc_queues_status(drvdata, mgr, total_len);
+ rc = cc_queues_status(drvdata, mgr, bli->len);
if (rc) {
/*
* There is still not room in the FIFO for
@@ -390,7 +391,7 @@
}
rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
- bli->len, false, ivgen);
+ bli->len, false);
spin_unlock(&mgr->hw_lock);
@@ -414,8 +415,6 @@
{
int rc;
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
- bool ivgen = !!cc_req->ivgen_dma_addr_len;
- unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
struct device *dev = drvdata_to_dev(drvdata);
bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
gfp_t flags = cc_gfp_flags(req);
@@ -428,7 +427,7 @@
}
spin_lock_bh(&mgr->hw_lock);
- rc = cc_queues_status(drvdata, mgr, total_len);
+ rc = cc_queues_status(drvdata, mgr, len);
#ifdef CC_DEBUG_FORCE_BACKLOG
if (backlog_ok)
@@ -453,8 +452,7 @@
}
if (!rc)
- rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
- ivgen);
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
spin_unlock_bh(&mgr->hw_lock);
return rc;
@@ -494,7 +492,7 @@
reinit_completion(&drvdata->hw_queue_avail);
}
- rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
spin_unlock_bh(&mgr->hw_lock);
if (rc != -EINPROGRESS) {
@@ -579,6 +577,8 @@
drvdata->request_mgr_handle;
unsigned int *tail = &request_mgr_handle->req_queue_tail;
unsigned int *head = &request_mgr_handle->req_queue_head;
+ int rc;
+ u32 mask;
while (request_mgr_handle->axi_completed) {
request_mgr_handle->axi_completed--;
@@ -596,8 +596,22 @@
cc_req = &request_mgr_handle->req_queue[*tail];
+ if (cc_req->cpp.is_cpp) {
+
+ dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
+ cc_req->cpp.slot, cc_req->cpp.alg);
+ mask = cc_cpp_int_mask(cc_req->cpp.alg,
+ cc_req->cpp.slot);
+ rc = (drvdata->irq & mask ? -EPERM : 0);
+ dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
+ drvdata->irq, rc);
+ } else {
+ dev_dbg(dev, "None CPP request completion\n");
+ rc = 0;
+ }
+
if (cc_req->user_cb)
- cc_req->user_cb(dev, cc_req->user_arg, 0);
+ cc_req->user_cb(dev, cc_req->user_arg, rc);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
dev_dbg(dev, "Request completed. axi_completed=%d\n",
@@ -618,47 +632,50 @@
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
-
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irq;
- irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+ dev_dbg(dev, "Completion handler called!\n");
+ irq = (drvdata->irq & drvdata->comp_mask);
- if (irq & CC_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it,
- * we clear it now
- */
- cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
- /* Avoid race with above clear: Test completion counter
- * once more
- */
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
+ /* Avoid race with above clear: Test completion counter once more */
- while (request_mgr_handle->axi_completed) {
- do {
- proc_completions(drvdata);
- /* At this point (after proc_completions()),
- * request_mgr_handle->axi_completed is 0.
- */
- request_mgr_handle->axi_completed =
- cc_axi_comp_count(drvdata);
- } while (request_mgr_handle->axi_completed > 0);
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
- cc_iowrite(drvdata, CC_REG(HOST_ICR),
- CC_COMP_IRQ_MASK);
+ dev_dbg(dev, "AXI completion after updated: %d\n",
+ request_mgr_handle->axi_completed);
+ while (request_mgr_handle->axi_completed) {
+ do {
+ drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
+ irq = (drvdata->irq & drvdata->comp_mask);
+ proc_completions(drvdata);
+
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
- }
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
+
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
+
/* after verifing that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
- cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
cc_proc_backlog(drvdata);
+ dev_dbg(dev, "Comp. handler done.\n");
}
/*
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index 573cb97..f46cf76 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_request_mgr.h
* Request Manager
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index c8c276f..62c885e 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include "cc_driver.h"
#include "cc_sram_mgr.h"
@@ -19,8 +19,7 @@
*/
void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
{
- /* Free "this" context */
- kfree(drvdata->sram_mgr_handle);
+ /* Nothing needed */
}
/**
@@ -48,7 +47,7 @@
}
/* Allocate "this" context */
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index d48649f..1d14de9 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_SRAM_MGR_H__
#define __CC_SRAM_MGR_H__