Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 010bbf6..38ee38b 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -123,7 +123,7 @@
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
- return ctx->dev->u_ctx;
+ return container_of(ctx->dev, struct uld_ctx, dev);
}
static inline int is_ofld_imm(const struct sk_buff *skb)
@@ -198,18 +198,36 @@
*err = 0;
}
-static inline void chcr_handle_aead_resp(struct aead_request *req,
+static int chcr_inc_wrcount(struct chcr_dev *dev)
+{
+ if (dev->state == CHCR_DETACH)
+ return 1;
+ atomic_inc(&dev->inflight);
+ return 0;
+}
+
+static inline void chcr_dec_wrcount(struct chcr_dev *dev)
+{
+ atomic_dec(&dev->inflight);
+}
+
+static inline int chcr_handle_aead_resp(struct aead_request *req,
unsigned char *input,
int err)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_dev *dev = a_ctx(tfm)->dev;
chcr_aead_common_exit(req);
if (reqctx->verify == VERIFY_SW) {
chcr_verify_tag(req, input, &err);
reqctx->verify = VERIFY_HW;
}
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
+
+ return err;
}
static void get_aes_decrypt_key(unsigned char *dec_key,
@@ -391,7 +409,7 @@
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
size_t size,
- dma_addr_t *addr)
+ dma_addr_t addr)
{
int j;
@@ -399,7 +417,7 @@
return;
j = walk->nents;
walk->to->len[j % 8] = htons(size);
- walk->to->addr[j % 8] = cpu_to_be64(*addr);
+ walk->to->addr[j % 8] = cpu_to_be64(addr);
j++;
if ((j % 8) == 0)
walk->to++;
@@ -473,16 +491,16 @@
static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
size_t size,
- dma_addr_t *addr)
+ dma_addr_t addr)
{
if (!size)
return;
if (walk->nents == 0) {
walk->sgl->len0 = cpu_to_be32(size);
- walk->sgl->addr0 = cpu_to_be64(*addr);
+ walk->sgl->addr0 = cpu_to_be64(addr);
} else {
- walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
walk->pair_idx = !walk->pair_idx;
if (!walk->pair_idx)
@@ -673,7 +691,7 @@
return min(srclen, dstlen);
}
-static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
u32 flags,
struct scatterlist *src,
struct scatterlist *dst,
@@ -683,9 +701,9 @@
{
int err;
- SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
- skcipher_request_set_tfm(subreq, cipher);
+ skcipher_request_set_sync_tfm(subreq, cipher);
skcipher_request_set_callback(subreq, flags, NULL, NULL);
skcipher_request_set_crypt(subreq, src, dst,
nbytes, iv);
@@ -717,7 +735,7 @@
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
- FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
+ FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
!!lcb, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
@@ -773,7 +791,7 @@
}
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -856,13 +874,14 @@
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
- crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+ crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ablkctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
return err;
}
@@ -1004,22 +1023,21 @@
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct crypto_cipher *cipher;
+ struct crypto_aes_ctx aes;
int ret, i;
u8 *key;
unsigned int keylen;
int round = reqctx->last_req_len / AES_BLOCK_SIZE;
int round8 = round / 8;
- cipher = ablkctx->aes_generic;
memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
keylen = ablkctx->enckey_len / 2;
key = ablkctx->key + keylen;
- ret = crypto_cipher_setkey(cipher, key, keylen);
+ ret = aes_expandkey(&aes, key, keylen);
if (ret)
- goto out;
- crypto_cipher_encrypt_one(cipher, iv, iv);
+ return ret;
+ aes_encrypt(&aes, iv, iv);
for (i = 0; i < round8; i++)
gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
@@ -1027,9 +1045,10 @@
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
if (!isfinal)
- crypto_cipher_decrypt_one(cipher, iv, iv);
-out:
- return ret;
+ aes_decrypt(&aes, iv, iv);
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
}
static int chcr_update_cipher_iv(struct ablkcipher_request *req,
@@ -1075,8 +1094,8 @@
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
- AES_BLOCK_SIZE));
+ ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
+ AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
@@ -1099,6 +1118,7 @@
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct cipher_wr_param wrparam;
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
if (err)
@@ -1160,6 +1180,7 @@
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
return err;
}
@@ -1186,7 +1207,10 @@
ablkctx->enckey_len, req->nbytes, ivsize);
goto error;
}
- chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+
+ err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ if (err)
+ goto error;
if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
AES_MIN_KEY_SIZE +
sizeof(struct cpl_rx_phys_dsgl) +
@@ -1275,15 +1299,21 @@
static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -ENOSPC;
+ goto error;
+ }
}
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
@@ -1294,15 +1324,23 @@
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return isfull ? -EBUSY : -EINPROGRESS;
+error:
+ chcr_dec_wrcount(dev);
+ return err;
}
static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
+
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
isfull = 1;
@@ -1310,8 +1348,8 @@
return -ENOSPC;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
- &skb, CHCR_DECRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
@@ -1323,7 +1361,6 @@
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx = NULL;
- struct adapter *adap;
unsigned int id;
int txq_perchan, txq_idx, ntxq;
int err = 0, rxq_perchan, rxq_idx;
@@ -1332,19 +1369,17 @@
if (!ctx->dev) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
+ err = -ENXIO;
pr_err("chcr device assignment fails\n");
goto out;
}
- ctx->dev = u_ctx->dev;
- adap = padap(ctx->dev);
- ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
- adap->vres.ncrypto_fc);
+ ctx->dev = &u_ctx->dev;
+ ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
- ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -1369,23 +1404,13 @@
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
- /* To update tweak*/
- ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
- if (IS_ERR(ablkctx->aes_generic)) {
- pr_err("failed to allocate aes cipher for tweak\n");
- return PTR_ERR(ablkctx->aes_generic);
- }
- } else
- ablkctx->aes_generic = NULL;
-
tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
@@ -1399,8 +1424,8 @@
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
* cannot be used as fallback in chcr_handle_cipher_response
*/
- ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1415,9 +1440,7 @@
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- crypto_free_skcipher(ablkctx->sw_cipher);
- if (ablkctx->aes_generic)
- crypto_free_cipher(ablkctx->aes_generic);
+ crypto_free_sync_skcipher(ablkctx->sw_cipher);
}
static int get_alg_config(struct algo_param *params,
@@ -1498,7 +1521,7 @@
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1562,6 +1585,7 @@
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = NULL;
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
@@ -1570,12 +1594,6 @@
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
if (nbytes + req_ctx->reqlen >= bs) {
remainder = (nbytes + req_ctx->reqlen) % bs;
@@ -1586,10 +1604,27 @@
req_ctx->reqlen += nbytes;
return 0;
}
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
+ /* Detach state for CHCR means lldi or padap is freed. Increasing
+ * inflight count for dev guarantees that lldi and padap is valid
+ */
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
+ }
+
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
@@ -1629,6 +1664,8 @@
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1646,10 +1683,16 @@
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct hash_wr_param params;
struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ int error = -EINVAL;
+
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
chcr_init_hctx_per_wr(req_ctx);
u_ctx = ULD_CTX(h_ctx(rtfm));
@@ -1686,19 +1729,25 @@
}
params.hash_size = crypto_ahash_digestsize(rtfm);
skb = create_hash_wr(req, ¶ms);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto err;
+ }
req_ctx->reqlen = 0;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+err:
+ chcr_dec_wrcount(dev);
+ return error;
}
static int chcr_ahash_finup(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
@@ -1707,17 +1756,24 @@
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1774,6 +1830,8 @@
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1781,6 +1839,7 @@
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
@@ -1789,19 +1848,26 @@
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1854,6 +1920,8 @@
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1925,6 +1993,7 @@
int digestsize, updated_digestsize;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+ struct chcr_dev *dev = h_ctx(tfm)->dev;
if (input == NULL)
goto out;
@@ -1967,6 +2036,7 @@
out:
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
}
@@ -1983,14 +2053,13 @@
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- chcr_handle_aead_resp(aead_request_cast(req), input, err);
+ err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ chcr_handle_cipher_resp(ablkcipher_request_cast(req),
input, err);
break;
-
case CRYPTO_ALG_TYPE_AHASH:
chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
}
@@ -2008,7 +2077,7 @@
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
chcr_init_hctx_per_wr(state);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -2042,7 +2111,6 @@
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
shash->tfm = hmacctx->base_hash;
- shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
@@ -2215,10 +2283,7 @@
error = -ENOMEM;
goto err;
}
- reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
- CHCR_SRC_SG_SIZE, 0);
- reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
- CHCR_SRC_SG_SIZE, req->assoclen);
+
return 0;
err:
return error;
@@ -2249,7 +2314,7 @@
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
- aead_request_set_ad(subreq, req->assoclen);
+ aead_request_set_ad(subreq, req->assoclen);
return op_type ? crypto_aead_decrypt(subreq) :
crypto_aead_encrypt(subreq);
}
@@ -2268,10 +2333,10 @@
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
- unsigned int kctx_len = 0, dnents;
- unsigned int assoclen = req->assoclen;
+ unsigned int kctx_len = 0, dnents, snents;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -2288,24 +2353,20 @@
if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
- assoclen = 0;
- reqctx->aad_nents = 0;
}
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
dnents += MIN_AUTH_SG; // For IV
-
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
- : (sgl_len(reqctx->src_nents + reqctx->aad_nents
- + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
+ : (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
@@ -2315,7 +2376,7 @@
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
@@ -2331,16 +2392,16 @@
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
- assoclen + 1);
- chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
+ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1,
+ null ? 0 : 1 + IV,
+ null ? 0 : IV + req->assoclen,
+ req->assoclen + IV + 1,
(temp & 0x1F0) >> 4);
chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
temp & 0xF,
- null ? 0 : assoclen + IV + 1,
+ null ? 0 : req->assoclen + IV + 1,
temp, temp);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
@@ -2367,23 +2428,24 @@
memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
- memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
} else {
- memcpy(reqctx->iv, req->iv, IV);
+ memcpy(ivptr, req->iv, IV);
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, 0);
reqctx->skb = skb;
@@ -2470,8 +2532,7 @@
}
void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen)
+ struct ulptx_sgl *ulptx)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2484,28 +2545,20 @@
buf += reqctx->b0_len;
}
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, assoclen, 0);
- buf += assoclen;
- memcpy(buf, reqctx->iv, IV);
- buf += IV;
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, req->cryptlen, req->assoclen);
+ buf, req->cryptlen + req->assoclen, 0);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
if (reqctx->b0_len)
ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
- &reqctx->b0_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
- ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
- req->assoclen);
+ reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
+ req->assoclen, 0);
ulptx_walk_end(&ulp_walk);
}
}
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2516,12 +2569,10 @@
u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl);
- if (reqctx->b0_len)
- dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
- dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
- temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+ dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+ temp = req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
@@ -2589,7 +2640,7 @@
ulptx_walk_init(&ulp_walk, ulptx);
if (param->bfr_len)
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
- &reqctx->hctx_wr.dma_addr);
+ reqctx->hctx_wr.dma_addr);
ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
param->sg_len, reqctx->hctx_wr.src_ofst);
reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
@@ -2689,8 +2740,7 @@
return 0;
}
-static void generate_b0(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
+static int generate_b0(struct aead_request *req, u8 *ivptr,
unsigned short op_type)
{
unsigned int l, lp, m;
@@ -2701,7 +2751,7 @@
m = crypto_aead_authsize(aead);
- memcpy(b0, reqctx->iv, 16);
+ memcpy(b0, ivptr, 16);
lp = b0[0];
l = lp + 1;
@@ -2715,6 +2765,8 @@
rc = set_msg_len(b0 + 16 - l,
(op_type == CHCR_DECRYPT_OP) ?
req->cryptlen - m : req->cryptlen, l);
+
+ return rc;
}
static inline int crypto_ccm_check_iv(const u8 *iv)
@@ -2727,29 +2779,31 @@
}
static int ccm_format_packet(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
+ u8 *ivptr,
unsigned int sub_type,
unsigned short op_type,
unsigned int assoclen)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
int rc = 0;
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
- reqctx->iv[0] = 3;
- memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
- memcpy(reqctx->iv + 4, req->iv, 8);
- memset(reqctx->iv + 12, 0, 4);
+ ivptr[0] = 3;
+ memcpy(ivptr + 1, &aeadctx->salt[0], 3);
+ memcpy(ivptr + 4, req->iv, 8);
+ memset(ivptr + 12, 0, 4);
} else {
- memcpy(reqctx->iv, req->iv, 16);
+ memcpy(ivptr, req->iv, 16);
}
if (assoclen)
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(assoclen);
- generate_b0(req, aeadctx, op_type);
+ rc = generate_b0(req, ivptr, op_type);
/* zero the ctr value */
- memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
return rc;
}
@@ -2762,7 +2816,7 @@
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
+ unsigned int c_id = a_ctx(tfm)->tx_chan_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2775,7 +2829,7 @@
((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
auth_offset = req->cryptlen ?
- (assoclen + IV + 1 + ccm_xtra) : 0;
+ (req->assoclen + IV + 1 + ccm_xtra) : 0;
if (op_type == CHCR_DECRYPT_OP) {
if (crypto_aead_authsize(tfm) != req->cryptlen)
tag_offset = crypto_aead_authsize(tfm);
@@ -2785,13 +2839,13 @@
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, assoclen + 1 + ccm_xtra);
+ 2, 1);
sec_cpl->pldlen =
- htonl(assoclen + IV + req->cryptlen + ccm_xtra);
+ htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- 1, assoclen + ccm_xtra, assoclen
- + IV + 1 + ccm_xtra, 0);
+ 1 + IV, IV + assoclen + ccm_xtra,
+ req->assoclen + IV + 1 + ccm_xtra, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
auth_offset, tag_offset,
@@ -2838,10 +2892,11 @@
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, kctx_len, dnents, temp;
+ unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
unsigned int sub_type, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2857,37 +2912,38 @@
error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
if (error)
goto err;
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
+ (reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
+ CHCR_DST_SG_SIZE, 0);
dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
+ snents += MIN_CCM_SG; //For B0
kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
reqctx->b0_len) <= SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
reqctx->b0_len, 16) :
- (sgl_len(reqctx->src_nents + reqctx->aad_nents +
- MIN_CCM_SG) * 8);
+ (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
- reqctx->b0_len, transhdr_len, reqctx->op)) {
+ reqctx->b0_len, transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
@@ -2897,16 +2953,17 @@
aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
+ error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
if (error)
goto dstmap_fail;
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
reqctx->b0_len) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
transhdr_len, temp, 0);
@@ -2931,10 +2988,11 @@
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
- unsigned int transhdr_len, dnents = 0;
+ unsigned int transhdr_len, dnents = 0, snents;
unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2946,19 +3004,19 @@
error = chcr_aead_common_init(req);
if (error)
return ERR_PTR(error);
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
(reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
+ CHCR_DST_SG_SIZE, 0);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
- (sgl_len(reqctx->src_nents +
- reqctx->aad_nents + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
+ (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
@@ -2968,7 +3026,7 @@
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
@@ -2979,15 +3037,15 @@
//Offset of tag from end
temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- a_ctx(tfm)->dev->rx_channel_id, 2,
- (assoclen + 1));
+ a_ctx(tfm)->tx_chan_id, 2, 1);
chcr_req->sec_cpl.pldlen =
- htonl(assoclen + IV + req->cryptlen);
+ htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1, 0);
+ assoclen ? 1 + IV : 0,
+ assoclen ? IV + assoclen : 0,
+ req->assoclen + IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
temp, temp);
chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
@@ -3002,25 +3060,26 @@
memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
/* prepare a 16 byte iv */
/* S A L T | IV | 0x00000001 */
if (get_aead_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
- memcpy(reqctx->iv, aeadctx->salt, 4);
- memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
+ memcpy(ivptr, aeadctx->salt, 4);
+ memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
} else {
- memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
+ memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
}
- *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+ *((unsigned int *)(ivptr + 12)) = htonl(0x01);
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ ulptx = (struct ulptx_sgl *)(ivptr + 16);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, reqctx->verify);
reqctx->skb = skb;
@@ -3118,12 +3177,12 @@
aeadctx->mayverify = VERIFY_HW;
break;
case ICV_12:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- aeadctx->mayverify = VERIFY_HW;
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
break;
case ICV_14:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
- aeadctx->mayverify = VERIFY_HW;
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
break;
case ICV_16:
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
@@ -3293,9 +3352,9 @@
{
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
- struct crypto_cipher *cipher;
unsigned int ck_size;
int ret = 0, key_ctx_size = 0;
+ struct crypto_aes_ctx aes;
aeadctx->enckey_len = 0;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -3338,23 +3397,15 @@
/* Calculate the H = CIPH(K, 0 repeated 16 times).
* It will go in key context
*/
- cipher = crypto_alloc_cipher("aes-generic", 0, 0);
- if (IS_ERR(cipher)) {
- aeadctx->enckey_len = 0;
- ret = -ENOMEM;
- goto out;
- }
-
- ret = crypto_cipher_setkey(cipher, key, keylen);
+ ret = aes_expandkey(&aes, key, keylen);
if (ret) {
aeadctx->enckey_len = 0;
- goto out1;
+ goto out;
}
memset(gctx->ghash_h, 0, AEAD_H_SIZE);
- crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
+ aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
+ memzero_explicit(&aes, sizeof(aes));
-out1:
- crypto_free_cipher(cipher);
out:
return ret;
}
@@ -3438,7 +3489,6 @@
SHASH_DESC_ON_STACK(shash, base_hash);
shash->tfm = base_hash;
- shash->flags = crypto_shash_get_flags(base_hash);
bs = crypto_shash_blocksize(base_hash);
align = KEYCTX_ALIGN_PAD(max_authsize);
o_ptr = actx->h_iopad + param.result_size + align;
@@ -3565,27 +3615,42 @@
create_wr_t create_wr_fn)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct uld_ctx *u_ctx;
struct sk_buff *skb;
int isfull = 0;
+ struct chcr_dev *cdev;
- if (!a_ctx(tfm)->dev) {
+ cdev = a_ctx(tfm)->dev;
+ if (!cdev) {
pr_err("chcr : %s : No crypto device.\n", __func__);
return -ENXIO;
}
+
+ if (chcr_inc_wrcount(cdev)) {
+ /* Detach state for CHCR means lldi or padap is freed.
+ * We cannot increment fallback here.
+ */
+ return chcr_aead_fallback(req, reqctx->op);
+ }
+
u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
a_ctx(tfm)->tx_qidx)) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ chcr_dec_wrcount(cdev);
return -ENOSPC;
+ }
}
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
+ if (IS_ERR_OR_NULL(skb)) {
+ chcr_dec_wrcount(cdev);
+ return PTR_ERR_OR_ZERO(skb);
+ }
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
@@ -3722,7 +3787,6 @@
.setkey = chcr_aes_rfc3686_setkey,
.encrypt = chcr_aes_encrypt,
.decrypt = chcr_aes_decrypt,
- .geniv = "seqiv",
}
}
},
@@ -4178,7 +4242,6 @@
.setauthsize = chcr_authenc_null_setauthsize,
}
},
-
};
/*