Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 98b8483..1cb310a 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -110,8 +110,8 @@
}
/**
- * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
- * receive a SPU response message for an ablkcipher request. Includes buffers to
+ * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
+ * receive a SPU response message for an skcipher request. Includes buffers to
* catch SPU message headers and the response data.
* @mssg: mailbox message containing the receive sg
* @rctx: crypto request context
@@ -130,7 +130,7 @@
* < 0 if an error
*/
static int
-spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
+spu_skcipher_rx_sg_create(struct brcm_message *mssg,
struct iproc_reqctx_s *rctx,
u8 rx_frag_num,
unsigned int chunksize, u32 stat_pad_len)
@@ -165,10 +165,6 @@
return -EFAULT;
}
- if (ctx->cipher.alg == CIPHER_ALG_RC4)
- /* Add buffer to catch 260-byte SUPDT field for RC4 */
- sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
-
if (stat_pad_len)
sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
@@ -179,8 +175,8 @@
}
/**
- * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
- * send a SPU request message for an ablkcipher request. Includes SPU message
+ * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
+ * send a SPU request message for an skcipher request. Includes SPU message
* headers and the request data.
* @mssg: mailbox message containing the transmit sg
* @rctx: crypto request context
@@ -198,7 +194,7 @@
* < 0 if an error
*/
static int
-spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
+spu_skcipher_tx_sg_create(struct brcm_message *mssg,
struct iproc_reqctx_s *rctx,
u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
{
@@ -283,7 +279,7 @@
}
/**
- * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
+ * handle_skcipher_req() - Submit as much of a block cipher request as fits in
* a single SPU request message, starting at the current position in the request
* data.
* @rctx: Crypto request context
@@ -300,24 +296,23 @@
* asynchronously
* Any other value indicates an error
*/
-static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
+static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
struct crypto_async_request *areq = rctx->parent;
- struct ablkcipher_request *req =
- container_of(areq, struct ablkcipher_request, base);
+ struct skcipher_request *req =
+ container_of(areq, struct skcipher_request, base);
struct iproc_ctx_s *ctx = rctx->ctx;
struct spu_cipher_parms cipher_parms;
- int err = 0;
- unsigned int chunksize = 0; /* Num bytes of request to submit */
- int remaining = 0; /* Bytes of request still to process */
+ int err;
+ unsigned int chunksize; /* Num bytes of request to submit */
+ int remaining; /* Bytes of request still to process */
int chunk_start; /* Beginning of data for current SPU msg */
/* IV or ctr value to use in this SPU msg */
u8 local_iv_ctr[MAX_IV_SIZE];
u32 stat_pad_len; /* num bytes to align status field */
u32 pad_len; /* total length of all padding */
- bool update_key = false;
struct brcm_message *mssg; /* mailbox message */
/* number of entries in src and dst sg in mailbox message. */
@@ -391,28 +386,6 @@
}
}
- if (ctx->cipher.alg == CIPHER_ALG_RC4) {
- rx_frag_num++;
- if (chunk_start) {
- /*
- * for non-first RC4 chunks, use SUPDT from previous
- * response as key for this chunk.
- */
- cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
- update_key = true;
- cipher_parms.type = CIPHER_TYPE_UPDT;
- } else if (!rctx->is_encrypt) {
- /*
- * First RC4 chunk. For decrypt, key in pre-built msg
- * header may have been changed if encrypt required
- * multiple chunks. So revert the key to the
- * ctx->enckey value.
- */
- update_key = true;
- cipher_parms.type = CIPHER_TYPE_INIT;
- }
- }
-
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
flow_log("max_payload infinite\n");
else
@@ -425,14 +398,9 @@
memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
sizeof(rctx->msg_buf.bcm_spu_req_hdr));
- /*
- * Pass SUPDT field as key. Key field in finish() call is only used
- * when update_key has been set above for RC4. Will be ignored in
- * all other cases.
- */
spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
ctx->spu_req_hdr_len, !(rctx->is_encrypt),
- &cipher_parms, update_key, chunksize);
+ &cipher_parms, chunksize);
atomic64_add(chunksize, &iproc_priv.bytes_out);
@@ -468,7 +436,7 @@
spu->spu_xts_tweak_in_payload())
rx_frag_num++; /* extra sg to insert tweak */
- err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
+ err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
stat_pad_len);
if (err)
return err;
@@ -482,7 +450,7 @@
spu->spu_xts_tweak_in_payload())
tx_frag_num++; /* extra sg to insert tweak */
- err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
+ err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
pad_len);
if (err)
return err;
@@ -495,16 +463,16 @@
}
/**
- * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
+ * handle_skcipher_resp() - Process a block cipher SPU response. Updates the
* total received count for the request and updates global stats.
* @rctx: Crypto request context
*/
-static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
+static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
{
struct spu_hw *spu = &iproc_priv.spu;
#ifdef DEBUG
struct crypto_async_request *areq = rctx->parent;
- struct ablkcipher_request *req = ablkcipher_request_cast(areq);
+ struct skcipher_request *req = skcipher_request_cast(areq);
#endif
struct iproc_ctx_s *ctx = rctx->ctx;
u32 payload_len;
@@ -527,9 +495,6 @@
__func__, rctx->total_received, payload_len);
dump_sg(req->dst, rctx->total_received, payload_len);
- if (ctx->cipher.alg == CIPHER_ALG_RC4)
- packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak,
- SPU_SUPDT_LEN);
rctx->total_received += payload_len;
if (rctx->total_received == rctx->total_todo) {
@@ -698,7 +663,7 @@
/* number of bytes still to be hashed in this req */
unsigned int nbytes_to_hash = 0;
- int err = 0;
+ int err;
unsigned int chunksize = 0; /* length of hash carry + new data */
/*
* length of new data, not from hash carry, to be submitted in
@@ -1664,7 +1629,7 @@
struct spu_hw *spu = &iproc_priv.spu;
struct brcm_message *mssg = msg;
struct iproc_reqctx_s *rctx;
- int err = 0;
+ int err;
rctx = mssg->ctx;
if (unlikely(!rctx)) {
@@ -1685,8 +1650,8 @@
/* Process the SPU response message */
switch (rctx->ctx->alg->type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- handle_ablkcipher_resp(rctx);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ handle_skcipher_resp(rctx);
break;
case CRYPTO_ALG_TYPE_AHASH:
handle_ahash_resp(rctx);
@@ -1708,8 +1673,8 @@
spu_chunk_cleanup(rctx);
switch (rctx->ctx->alg->type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = handle_ablkcipher_req(rctx);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = handle_skcipher_req(rctx);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = handle_ahash_req(rctx);
@@ -1739,7 +1704,7 @@
/* ==================== Kernel Cryptographic API ==================== */
/**
- * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
+ * skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
* @req: Crypto API request
* @encrypt: true if encrypting; false if decrypting
*
@@ -1747,11 +1712,11 @@
* asynchronously
* < 0 if an error
*/
-static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
+static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
{
- struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
+ struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
struct iproc_ctx_s *ctx =
- crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
int err;
flow_log("%s() enc:%u\n", __func__, encrypt);
@@ -1761,7 +1726,7 @@
rctx->parent = &req->base;
rctx->is_encrypt = encrypt;
rctx->bd_suppress = false;
- rctx->total_todo = req->nbytes;
+ rctx->total_todo = req->cryptlen;
rctx->src_sent = 0;
rctx->total_sent = 0;
rctx->total_received = 0;
@@ -1782,15 +1747,15 @@
ctx->cipher.mode == CIPHER_MODE_GCM ||
ctx->cipher.mode == CIPHER_MODE_CCM) {
rctx->iv_ctr_len =
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
+ crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
+ memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
} else {
rctx->iv_ctr_len = 0;
}
/* Choose a SPU to process this request */
rctx->chan_idx = select_channel();
- err = handle_ablkcipher_req(rctx);
+ err = handle_skcipher_req(rctx);
if (err != -EINPROGRESS)
/* synchronous result */
spu_chunk_cleanup(rctx);
@@ -1798,13 +1763,13 @@
return err;
}
-static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des_key(cipher, key);
+ err = verify_skcipher_des_key(cipher, key);
if (err)
return err;
@@ -1812,13 +1777,13 @@
return 0;
}
-static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
int err;
- err = verify_ablkcipher_des3_key(cipher, key);
+ err = verify_skcipher_des3_key(cipher, key);
if (err)
return err;
@@ -1826,10 +1791,10 @@
return 0;
}
-static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
if (ctx->cipher.mode == CIPHER_MODE_XTS)
/* XTS includes two keys of equal length */
@@ -1846,7 +1811,6 @@
ctx->cipher_type = CIPHER_TYPE_AES256;
break;
default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
@@ -1854,36 +1818,16 @@
return 0;
}
-static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int keylen)
-{
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
- int i;
-
- ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
-
- ctx->enckey[0] = 0x00; /* 0x00 */
- ctx->enckey[1] = 0x00; /* i */
- ctx->enckey[2] = 0x00; /* 0x00 */
- ctx->enckey[3] = 0x00; /* j */
- for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
- ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
-
- ctx->cipher_type = CIPHER_TYPE_INIT;
-
- return 0;
-}
-
-static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
struct spu_hw *spu = &iproc_priv.spu;
- struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
+ struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
struct spu_cipher_parms cipher_parms;
u32 alloc_len = 0;
int err;
- flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
+ flow_log("skcipher_setkey() keylen: %d\n", keylen);
flow_dump(" key: ", key, keylen);
switch (ctx->cipher.alg) {
@@ -1896,9 +1840,6 @@
case CIPHER_ALG_AES:
err = aes_setkey(cipher, key, keylen);
break;
- case CIPHER_ALG_RC4:
- err = rc4_setkey(cipher, key, keylen);
- break;
default:
pr_err("%s() Error: unknown cipher alg\n", __func__);
err = -EINVAL;
@@ -1906,11 +1847,9 @@
if (err)
return err;
- /* RC4 already populated ctx->enkey */
- if (ctx->cipher.alg != CIPHER_ALG_RC4) {
- memcpy(ctx->enckey, key, keylen);
- ctx->enckeylen = keylen;
- }
+ memcpy(ctx->enckey, key, keylen);
+ ctx->enckeylen = keylen;
+
/* SPU needs XTS keys in the reverse order the crypto API presents */
if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
(ctx->cipher.mode == CIPHER_MODE_XTS)) {
@@ -1926,7 +1865,7 @@
alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
cipher_parms.iv_buf = NULL;
- cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
+ cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
cipher_parms.alg = ctx->cipher.alg;
@@ -1950,17 +1889,17 @@
return 0;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
+ flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
- return ablkcipher_enqueue(req, true);
+ return skcipher_enqueue(req, true);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
- return ablkcipher_enqueue(req, false);
+ flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
+ return skcipher_enqueue(req, false);
}
static int ahash_enqueue(struct ahash_request *req)
@@ -1968,7 +1907,7 @@
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
- int err = 0;
+ int err;
const char *alg_name;
flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
@@ -2300,7 +2239,7 @@
static int ahash_digest(struct ahash_request *req)
{
- int err = 0;
+ int err;
flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
@@ -2873,9 +2812,6 @@
goto badkey;
}
break;
- case CIPHER_ALG_RC4:
- ctx->cipher_type = CIPHER_TYPE_INIT;
- break;
default:
pr_err("%s() Error: Unknown cipher alg\n", __func__);
return -EINVAL;
@@ -2894,13 +2830,8 @@
ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2916,7 +2847,6 @@
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -2968,13 +2898,8 @@
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = crypto_aead_setkey(ctx->fallback_cipher, key,
keylen + ctx->salt_len);
- if (ret) {
+ if (ret)
flow_log(" fallback setkey() returned:%d\n", ret);
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- (ctx->fallback_cipher->base.crt_flags &
- CRYPTO_TFM_RES_MASK);
- }
}
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
@@ -2993,7 +2918,6 @@
ctx->authkeylen = 0;
ctx->digestsize = 0;
- crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -3259,7 +3183,9 @@
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3282,7 +3208,9 @@
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3305,7 +3233,9 @@
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3328,7 +3258,9 @@
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3351,7 +3283,9 @@
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3374,7 +3308,9 @@
.cra_name = "authenc(hmac(sha224),cbc(des))",
.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3397,7 +3333,9 @@
.cra_name = "authenc(hmac(sha256),cbc(des))",
.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3420,7 +3358,9 @@
.cra_name = "authenc(hmac(sha384),cbc(des))",
.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3443,7 +3383,9 @@
.cra_name = "authenc(hmac(sha512),cbc(des))",
.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3466,7 +3408,9 @@
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3489,7 +3433,9 @@
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3512,7 +3458,9 @@
.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3535,7 +3483,9 @@
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3558,7 +3508,9 @@
.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3581,7 +3533,9 @@
.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3598,39 +3552,16 @@
.auth_first = 0,
},
-/* ABLKCIPHER algorithms. */
+/* SKCIPHER algorithms. */
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(arc4)",
- .cra_driver_name = "ecb-arc4-iproc",
- .cra_blocksize = ARC4_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .ivsize = 0,
- }
- },
- .cipher_info = {
- .alg = CIPHER_ALG_RC4,
- .mode = CIPHER_MODE_NONE,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(des)",
- .cra_driver_name = "ofb-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(des)",
+ .base.cra_driver_name = "ofb-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3642,16 +3573,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3663,16 +3592,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-iproc",
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-iproc",
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_DES,
@@ -3684,16 +3611,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "ofb-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(des3_ede)",
+ .base.cra_driver_name = "ofb-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3705,16 +3630,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3726,16 +3649,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-iproc",
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-iproc",
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_3DES,
@@ -3747,16 +3668,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "ofb-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3768,16 +3687,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3789,16 +3706,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = 0,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3810,16 +3725,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3831,16 +3744,14 @@
},
},
{
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .alg.crypto = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-iproc",
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ablkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- }
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-iproc",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
},
.cipher_info = {
.alg = CIPHER_ALG_AES,
@@ -3861,7 +3772,8 @@
.cra_name = "md5",
.cra_driver_name = "md5-iproc",
.cra_blocksize = MD5_BLOCK_WORDS * 4,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.cipher_info = {
@@ -4295,16 +4207,17 @@
return 0;
}
-static int ablkcipher_cra_init(struct crypto_tfm *tfm)
+static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+ struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
struct iproc_alg_s *cipher_alg;
flow_log("%s()\n", __func__);
- tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
+ crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
- cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
+ cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
return generic_cra_init(tfm, cipher_alg);
}
@@ -4376,6 +4289,11 @@
atomic_dec(&iproc_priv.session_count);
}
+static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+ generic_cra_exit(crypto_skcipher_tfm(tfm));
+}
+
static void aead_cra_exit(struct crypto_aead *aead)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
@@ -4480,7 +4398,7 @@
for (i = 0; i < iproc_priv.spu.num_chan; i++) {
iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
if (IS_ERR(iproc_priv.mbox[i])) {
- err = (int)PTR_ERR(iproc_priv.mbox[i]);
+ err = PTR_ERR(iproc_priv.mbox[i]);
dev_err(dev,
"Mbox channel %d request failed with err %d",
i, err);
@@ -4537,37 +4455,30 @@
atomic_set(&iproc_priv.bad_icv, 0);
}
-static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
+static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
{
- struct spu_hw *spu = &iproc_priv.spu;
- struct crypto_alg *crypto = &driver_alg->alg.crypto;
+ struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
int err;
- /* SPU2 does not support RC4 */
- if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
- (spu->spu_type == SPU_TYPE_SPU2))
- return 0;
+ crypto->base.cra_module = THIS_MODULE;
+ crypto->base.cra_priority = cipher_pri;
+ crypto->base.cra_alignmask = 0;
+ crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
+ crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
- crypto->cra_module = THIS_MODULE;
- crypto->cra_priority = cipher_pri;
- crypto->cra_alignmask = 0;
- crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
+ crypto->init = skcipher_init_tfm;
+ crypto->exit = skcipher_exit_tfm;
+ crypto->setkey = skcipher_setkey;
+ crypto->encrypt = skcipher_encrypt;
+ crypto->decrypt = skcipher_decrypt;
- crypto->cra_init = ablkcipher_cra_init;
- crypto->cra_exit = generic_cra_exit;
- crypto->cra_type = &crypto_ablkcipher_type;
- crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
-
- crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
- crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
- crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
-
- err = crypto_register_alg(crypto);
+ err = crypto_register_skcipher(crypto);
/* Mark alg as having been registered, if successful */
if (err == 0)
driver_alg->registered = true;
- pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name);
+ pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name);
return err;
}
@@ -4594,7 +4505,8 @@
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
hash->halg.base.cra_init = ahash_cra_init;
hash->halg.base.cra_exit = generic_cra_exit;
- hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@@ -4638,7 +4550,7 @@
aead->base.cra_alignmask = 0;
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
- aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
+ aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
/* setkey set in alg initialization */
aead->setauthsize = aead_setauthsize;
aead->encrypt = aead_encrypt;
@@ -4662,8 +4574,8 @@
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
switch (driver_algs[i].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = spu_register_ablkcipher(&driver_algs[i]);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ err = spu_register_skcipher(&driver_algs[i]);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = spu_register_ahash(&driver_algs[i]);
@@ -4693,8 +4605,8 @@
if (!driver_algs[j].registered)
continue;
switch (driver_algs[j].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto_unregister_alg(&driver_algs[j].alg.crypto);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
driver_algs[j].registered = false;
break;
case CRYPTO_ALG_TYPE_AHASH:
@@ -4764,21 +4676,20 @@
matched_spu_type = of_device_get_match_data(dev);
if (!matched_spu_type) {
- dev_err(&pdev->dev, "Failed to match device\n");
+ dev_err(dev, "Failed to match device\n");
return -ENODEV;
}
spu->spu_type = matched_spu_type->type;
spu->spu_subtype = matched_spu_type->subtype;
- i = 0;
for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
if (IS_ERR(spu->reg_vbase[i])) {
err = PTR_ERR(spu->reg_vbase[i]);
- dev_err(&pdev->dev, "Failed to map registers: %d\n",
+ dev_err(dev, "Failed to map registers: %d\n",
err);
spu->reg_vbase[i] = NULL;
return err;
@@ -4794,7 +4705,7 @@
{
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
- int err = 0;
+ int err;
iproc_priv.pdev = pdev;
platform_set_drvdata(iproc_priv.pdev,
@@ -4804,7 +4715,7 @@
if (err < 0)
goto failure;
- err = spu_mb_init(&pdev->dev);
+ err = spu_mb_init(dev);
if (err < 0)
goto failure;
@@ -4813,7 +4724,7 @@
else if (spu->spu_type == SPU_TYPE_SPU2)
iproc_priv.bcm_hdr_len = 0;
- spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
+ spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
spu_counters_init();
@@ -4850,10 +4761,10 @@
continue;
switch (driver_algs[i].type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto_unregister_alg(&driver_algs[i].alg.crypto);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
dev_dbg(dev, " unregistered cipher %s\n",
- driver_algs[i].alg.crypto.cra_driver_name);
+ driver_algs[i].alg.skcipher.base.cra_driver_name);
driver_algs[i].registered = false;
break;
case CRYPTO_ALG_TYPE_AHASH:
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 01feed2..892823e 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -1,3 +1,4 @@
+
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2016 Broadcom
@@ -11,6 +12,7 @@
#include <linux/mailbox_client.h>
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/aead.h>
#include <crypto/arc4.h>
#include <crypto/gcm.h>
@@ -102,7 +104,7 @@
struct iproc_alg_s {
u32 type;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg hash;
struct aead_alg aead;
} alg;
@@ -149,7 +151,7 @@
u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
union {
- /* Buffers only used for ablkcipher */
+ /* Buffers only used for skcipher */
struct {
/*
* Field used for either SUPDT when RC4 is used
@@ -214,7 +216,7 @@
/*
* Buffer to hold SPU message header template. Template is created at
- * setkey time for ablkcipher requests, since most of the fields in the
+ * setkey time for skcipher requests, since most of the fields in the
* header are known at that time. At request time, just fill in a few
* missing pieces related to length of data in the request and IVs, etc.
*/
@@ -256,7 +258,7 @@
/* total todo, rx'd, and sent for this request */
unsigned int total_todo;
- unsigned int total_received; /* only valid for ablkcipher */
+ unsigned int total_received; /* only valid for skcipher */
unsigned int total_sent;
/*
@@ -386,7 +388,6 @@
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding,
u32 hash_pad_len, enum hash_alg auth_alg,
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index e7562e9..fe126f9 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -222,10 +222,6 @@
cipher_key_len = 24;
name = "3DES";
break;
- case CIPHER_ALG_RC4:
- cipher_key_len = 260;
- name = "ARC4";
- break;
case CIPHER_ALG_AES:
switch (cipher_type) {
case CIPHER_TYPE_AES128:
@@ -919,21 +915,16 @@
* @spu_req_hdr_len: Length in bytes of the SPU request header
* @isInbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
- * @update_key: If true, rewrite the cipher key in SCTX
* @data_size: Length of the data in the BD field
*
* Assumes much of the header was already filled in at setkey() time in
* spum_cipher_req_init().
- * spum_cipher_req_init() fills in the encryption key. For RC4, when submitting
- * a request for a non-first chunk, we use the 260-byte SUPDT field from the
- * previous response as the key. update_key is true for this case. Unused in all
- * other cases.
+ * spum_cipher_req_init() fills in the encryption key.
*/
void spum_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size)
{
struct SPUHEADER *spuh;
@@ -948,11 +939,6 @@
flow_log(" in: %u\n", is_inbound);
flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
cipher_parms->type);
- if (update_key) {
- flow_log(" cipher key len: %u\n", cipher_parms->key_len);
- flow_dump(" key: ", cipher_parms->key_buf,
- cipher_parms->key_len);
- }
/*
* In XTS mode, API puts "i" parameter (block tweak) in IV. For
@@ -981,13 +967,6 @@
else
cipher_bits &= ~CIPHER_INBOUND;
- /* update encryption key for RC4 on non-first chunk */
- if (update_key) {
- spuh->sa.cipher_flags |=
- cipher_parms->type << CIPHER_TYPE_SHIFT;
- memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len);
- }
-
if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len)
/* cipher iv provided so put it in here */
memcpy(bdesc_ptr - cipher_parms->iv_len, cipher_parms->iv_buf,
diff --git a/drivers/crypto/bcm/spu.h b/drivers/crypto/bcm/spu.h
index b247bc5..dd13238 100644
--- a/drivers/crypto/bcm/spu.h
+++ b/drivers/crypto/bcm/spu.h
@@ -251,7 +251,6 @@
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void spum_request_pad(u8 *pad_start,
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 2add510..c860ffb 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -542,7 +542,7 @@
/**
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for
- * subsequent ablkcipher requests for this context.
+ * subsequent skcipher requests for this context.
* @spu2_cipher_type: Cipher algorithm
* @spu2_mode: Cipher mode
* @cipher_key_len: Length of cipher key, in bytes
@@ -1107,13 +1107,13 @@
}
/**
- * spu_cipher_req_init() - Build an ablkcipher SPU2 request message header,
+ * spu_cipher_req_init() - Build an skcipher SPU2 request message header,
* including FMD and OMD.
* @spu_hdr: Location of start of SPU request (FMD field)
* @cipher_parms: Parameters describing cipher request
*
* Called at setkey time to initialize a msg header that can be reused for all
- * subsequent ablkcipher requests. Construct the message starting at spu_hdr.
+ * subsequent skcipher requests. Construct the message starting at spu_hdr.
* Caller should allocate this buffer in DMA-able memory at least
* SPU_HEADER_ALLOC_LEN bytes long.
*
@@ -1170,21 +1170,16 @@
* @spu_req_hdr_len: Length in bytes of the SPU request header
* @isInbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
- * @update_key: If true, rewrite the cipher key in SCTX
* @data_size: Length of the data in the BD field
*
* Assumes much of the header was already filled in at setkey() time in
* spu_cipher_req_init().
- * spu_cipher_req_init() fills in the encryption key. For RC4, when submitting a
- * request for a non-first chunk, we use the 260-byte SUPDT field from the
- * previous response as the key. update_key is true for this case. Unused in all
- * other cases.
+ * spu_cipher_req_init() fills in the encryption key.
*/
void spu2_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size)
{
struct SPU2_FMD *fmd;
@@ -1196,11 +1191,6 @@
flow_log(" in: %u\n", is_inbound);
flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
cipher_parms->type);
- if (update_key) {
- flow_log(" cipher key len: %u\n", cipher_parms->key_len);
- flow_dump(" key: ", cipher_parms->key_buf,
- cipher_parms->key_len);
- }
flow_log(" iv len: %d\n", cipher_parms->iv_len);
flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
flow_log(" data_size: %u\n", data_size);
diff --git a/drivers/crypto/bcm/spu2.h b/drivers/crypto/bcm/spu2.h
index 03af6c3..6e666bf 100644
--- a/drivers/crypto/bcm/spu2.h
+++ b/drivers/crypto/bcm/spu2.h
@@ -200,7 +200,6 @@
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
enum hash_alg auth_alg, enum hash_mode auth_mode,
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 7227dbf..77aeedb 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -366,88 +366,88 @@
ipriv = filp->private_data;
out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Number of SPUs.........%u\n",
ipriv->spu.num_spu);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Current sessions.......%u\n",
atomic_read(&ipriv->session_count));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Session count..........%u\n",
atomic_read(&ipriv->stream_count));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Cipher setkey..........%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Cipher Ops.............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_CIPHER]));
for (alg = 0; alg < CIPHER_ALG_LAST; alg++) {
for (mode = 0; mode < CIPHER_MODE_LAST; mode++) {
op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
spu_alg_name(alg, mode), op_cnt);
}
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Hash Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HASH]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hash_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"HMAC setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"HMAC Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HMAC]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hmac_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"AEAD setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD]));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"AEAD Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_AEAD]));
for (alg = 0; alg < AEAD_TYPE_LAST; alg++) {
op_cnt = atomic_read(&ipriv->aead_cnt[alg]);
if (op_cnt) {
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
aead_alg_name[alg], op_cnt);
}
}
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Bytes of req data......%llu\n",
(u64)atomic64_read(&ipriv->bytes_out));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Bytes of resp data.....%llu\n",
(u64)atomic64_read(&ipriv->bytes_in));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Mailbox full...........%u\n",
atomic_read(&ipriv->mb_no_spc));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Mailbox send failures..%u\n",
atomic_read(&ipriv->mb_send_fail));
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
"Check ICV errors.......%u\n",
atomic_read(&ipriv->bad_icv));
if (ipriv->spu.spu_type == SPU_TYPE_SPUM)
@@ -455,7 +455,7 @@
spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] +
SPU_OFIFO_CTRL);
fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK;
- out_offset += snprintf(buf + out_offset,
+ out_offset += scnprintf(buf + out_offset,
out_count - out_offset,
"SPU %d output FIFO high water.....%u\n",
i, fifo_len);