blob: 2912006b946b018f13af84322342633cd4284ff7 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0+
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * caam - Freescale FSL CAAM support for crypto API
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
David Brazdil0f672f62019-12-10 10:32:29 +00006 * Copyright 2016-2019 NXP
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 *
8 * Based on talitos crypto API driver.
9 *
10 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
11 *
12 * --------------- ---------------
13 * | JobDesc #1 |-------------------->| ShareDesc |
14 * | *(packet 1) | | (PDB) |
15 * --------------- |------------->| (hashKey) |
16 * . | | (cipherKey) |
17 * . | |-------->| (operation) |
18 * --------------- | | ---------------
19 * | JobDesc #2 |------| |
20 * | *(packet 2) | |
21 * --------------- |
22 * . |
23 * . |
24 * --------------- |
25 * | JobDesc #3 |------------
26 * | *(packet 3) |
27 * ---------------
28 *
29 * The SharedDesc never changes for a connection unless rekeyed, but
30 * each packet will likely be in a different place. So all we need
31 * to know to process the packet is where the input is, where the
32 * output goes, and what context we want to process with. Context is
33 * in the SharedDesc, packet references in the JobDesc.
34 *
35 * So, a job desc looks like:
36 *
37 * ---------------------
38 * | Header |
39 * | ShareDesc Pointer |
40 * | SEQ_OUT_PTR |
41 * | (output buffer) |
42 * | (output length) |
43 * | SEQ_IN_PTR |
44 * | (input buffer) |
45 * | (input length) |
46 * ---------------------
47 */
48
49#include "compat.h"
50
51#include "regs.h"
52#include "intern.h"
53#include "desc_constr.h"
54#include "jr.h"
55#include "error.h"
56#include "sg_sw_sec4.h"
57#include "key_gen.h"
58#include "caamalg_desc.h"
59
60/*
61 * crypto alg
62 */
63#define CAAM_CRA_PRIORITY 3000
64/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
65#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
66 CTR_RFC3686_NONCE_SIZE + \
67 SHA512_DIGEST_SIZE * 2)
68
69#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
70#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
71 CAAM_CMD_SZ * 4)
72#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
73 CAAM_CMD_SZ * 5)
74
David Brazdil0f672f62019-12-10 10:32:29 +000075#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
76
77#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
79
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080struct caam_alg_entry {
81 int class1_alg_type;
82 int class2_alg_type;
83 bool rfc3686;
84 bool geniv;
David Brazdil0f672f62019-12-10 10:32:29 +000085 bool nodkp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086};
87
88struct caam_aead_alg {
89 struct aead_alg aead;
90 struct caam_alg_entry caam;
91 bool registered;
92};
93
David Brazdil0f672f62019-12-10 10:32:29 +000094struct caam_skcipher_alg {
95 struct skcipher_alg skcipher;
96 struct caam_alg_entry caam;
97 bool registered;
98};
99
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100/*
101 * per-session context
102 */
103struct caam_ctx {
104 u32 sh_desc_enc[DESC_MAX_USED_LEN];
105 u32 sh_desc_dec[DESC_MAX_USED_LEN];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000106 u8 key[CAAM_MAX_KEY_SIZE];
107 dma_addr_t sh_desc_enc_dma;
108 dma_addr_t sh_desc_dec_dma;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109 dma_addr_t key_dma;
110 enum dma_data_direction dir;
111 struct device *jrdev;
112 struct alginfo adata;
113 struct alginfo cdata;
114 unsigned int authsize;
115};
116
117static int aead_null_set_sh_desc(struct crypto_aead *aead)
118{
119 struct caam_ctx *ctx = crypto_aead_ctx(aead);
120 struct device *jrdev = ctx->jrdev;
121 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
122 u32 *desc;
123 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
124 ctx->adata.keylen_pad;
125
126 /*
127 * Job Descriptor and Shared Descriptors
128 * must all fit into the 64-word Descriptor h/w Buffer
129 */
130 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
131 ctx->adata.key_inline = true;
132 ctx->adata.key_virt = ctx->key;
133 } else {
134 ctx->adata.key_inline = false;
135 ctx->adata.key_dma = ctx->key_dma;
136 }
137
138 /* aead_encrypt shared descriptor */
139 desc = ctx->sh_desc_enc;
140 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
141 ctrlpriv->era);
142 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
143 desc_bytes(desc), ctx->dir);
144
145 /*
146 * Job Descriptor and Shared Descriptors
147 * must all fit into the 64-word Descriptor h/w Buffer
148 */
149 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
150 ctx->adata.key_inline = true;
151 ctx->adata.key_virt = ctx->key;
152 } else {
153 ctx->adata.key_inline = false;
154 ctx->adata.key_dma = ctx->key_dma;
155 }
156
157 /* aead_decrypt shared descriptor */
158 desc = ctx->sh_desc_dec;
159 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
160 ctrlpriv->era);
161 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
162 desc_bytes(desc), ctx->dir);
163
164 return 0;
165}
166
167static int aead_set_sh_desc(struct crypto_aead *aead)
168{
169 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
170 struct caam_aead_alg, aead);
171 unsigned int ivsize = crypto_aead_ivsize(aead);
172 struct caam_ctx *ctx = crypto_aead_ctx(aead);
173 struct device *jrdev = ctx->jrdev;
174 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
175 u32 ctx1_iv_off = 0;
176 u32 *desc, *nonce = NULL;
177 u32 inl_mask;
178 unsigned int data_len[2];
179 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
180 OP_ALG_AAI_CTR_MOD128);
181 const bool is_rfc3686 = alg->caam.rfc3686;
182
183 if (!ctx->authsize)
184 return 0;
185
186 /* NULL encryption / decryption */
187 if (!ctx->cdata.keylen)
188 return aead_null_set_sh_desc(aead);
189
190 /*
191 * AES-CTR needs to load IV in CONTEXT1 reg
192 * at an offset of 128bits (16bytes)
193 * CONTEXT1[255:128] = IV
194 */
195 if (ctr_mode)
196 ctx1_iv_off = 16;
197
198 /*
199 * RFC3686 specific:
200 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
201 */
202 if (is_rfc3686) {
203 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
204 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
205 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
206 }
207
David Brazdil0f672f62019-12-10 10:32:29 +0000208 /*
209 * In case |user key| > |derived key|, using DKP<imm,imm>
210 * would result in invalid opcodes (last bytes of user key) in
211 * the resulting descriptor. Use DKP<ptr,imm> instead => both
212 * virtual and dma key addresses are needed.
213 */
214 ctx->adata.key_virt = ctx->key;
215 ctx->adata.key_dma = ctx->key_dma;
216
217 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
218 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
219
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220 data_len[0] = ctx->adata.keylen_pad;
221 data_len[1] = ctx->cdata.keylen;
222
223 if (alg->caam.geniv)
224 goto skip_enc;
225
226 /*
227 * Job Descriptor and Shared Descriptors
228 * must all fit into the 64-word Descriptor h/w Buffer
229 */
230 if (desc_inline_query(DESC_AEAD_ENC_LEN +
231 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
232 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
233 ARRAY_SIZE(data_len)) < 0)
234 return -EINVAL;
235
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236 ctx->adata.key_inline = !!(inl_mask & 1);
237 ctx->cdata.key_inline = !!(inl_mask & 2);
238
239 /* aead_encrypt shared descriptor */
240 desc = ctx->sh_desc_enc;
241 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
242 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
243 false, ctrlpriv->era);
244 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
245 desc_bytes(desc), ctx->dir);
246
247skip_enc:
248 /*
249 * Job Descriptor and Shared Descriptors
250 * must all fit into the 64-word Descriptor h/w Buffer
251 */
252 if (desc_inline_query(DESC_AEAD_DEC_LEN +
253 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
254 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
255 ARRAY_SIZE(data_len)) < 0)
256 return -EINVAL;
257
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 ctx->adata.key_inline = !!(inl_mask & 1);
259 ctx->cdata.key_inline = !!(inl_mask & 2);
260
261 /* aead_decrypt shared descriptor */
262 desc = ctx->sh_desc_dec;
263 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
264 ctx->authsize, alg->caam.geniv, is_rfc3686,
265 nonce, ctx1_iv_off, false, ctrlpriv->era);
266 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
267 desc_bytes(desc), ctx->dir);
268
269 if (!alg->caam.geniv)
270 goto skip_givenc;
271
272 /*
273 * Job Descriptor and Shared Descriptors
274 * must all fit into the 64-word Descriptor h/w Buffer
275 */
276 if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
277 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
278 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
279 ARRAY_SIZE(data_len)) < 0)
280 return -EINVAL;
281
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282 ctx->adata.key_inline = !!(inl_mask & 1);
283 ctx->cdata.key_inline = !!(inl_mask & 2);
284
285 /* aead_givencrypt shared descriptor */
286 desc = ctx->sh_desc_enc;
287 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
288 ctx->authsize, is_rfc3686, nonce,
289 ctx1_iv_off, false, ctrlpriv->era);
290 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
291 desc_bytes(desc), ctx->dir);
292
293skip_givenc:
294 return 0;
295}
296
297static int aead_setauthsize(struct crypto_aead *authenc,
298 unsigned int authsize)
299{
300 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
301
302 ctx->authsize = authsize;
303 aead_set_sh_desc(authenc);
304
305 return 0;
306}
307
308static int gcm_set_sh_desc(struct crypto_aead *aead)
309{
310 struct caam_ctx *ctx = crypto_aead_ctx(aead);
311 struct device *jrdev = ctx->jrdev;
312 unsigned int ivsize = crypto_aead_ivsize(aead);
313 u32 *desc;
314 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
315 ctx->cdata.keylen;
316
317 if (!ctx->cdata.keylen || !ctx->authsize)
318 return 0;
319
320 /*
321 * AES GCM encrypt shared descriptor
322 * Job Descriptor and Shared Descriptor
323 * must fit into the 64-word Descriptor h/w Buffer
324 */
325 if (rem_bytes >= DESC_GCM_ENC_LEN) {
326 ctx->cdata.key_inline = true;
327 ctx->cdata.key_virt = ctx->key;
328 } else {
329 ctx->cdata.key_inline = false;
330 ctx->cdata.key_dma = ctx->key_dma;
331 }
332
333 desc = ctx->sh_desc_enc;
334 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
335 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
336 desc_bytes(desc), ctx->dir);
337
338 /*
339 * Job Descriptor and Shared Descriptors
340 * must all fit into the 64-word Descriptor h/w Buffer
341 */
342 if (rem_bytes >= DESC_GCM_DEC_LEN) {
343 ctx->cdata.key_inline = true;
344 ctx->cdata.key_virt = ctx->key;
345 } else {
346 ctx->cdata.key_inline = false;
347 ctx->cdata.key_dma = ctx->key_dma;
348 }
349
350 desc = ctx->sh_desc_dec;
351 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
352 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
353 desc_bytes(desc), ctx->dir);
354
355 return 0;
356}
357
358static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
359{
360 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
David Brazdil0f672f62019-12-10 10:32:29 +0000361 int err;
362
363 err = crypto_gcm_check_authsize(authsize);
364 if (err)
365 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000366
367 ctx->authsize = authsize;
368 gcm_set_sh_desc(authenc);
369
370 return 0;
371}
372
373static int rfc4106_set_sh_desc(struct crypto_aead *aead)
374{
375 struct caam_ctx *ctx = crypto_aead_ctx(aead);
376 struct device *jrdev = ctx->jrdev;
377 unsigned int ivsize = crypto_aead_ivsize(aead);
378 u32 *desc;
379 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
380 ctx->cdata.keylen;
381
382 if (!ctx->cdata.keylen || !ctx->authsize)
383 return 0;
384
385 /*
386 * RFC4106 encrypt shared descriptor
387 * Job Descriptor and Shared Descriptor
388 * must fit into the 64-word Descriptor h/w Buffer
389 */
390 if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
391 ctx->cdata.key_inline = true;
392 ctx->cdata.key_virt = ctx->key;
393 } else {
394 ctx->cdata.key_inline = false;
395 ctx->cdata.key_dma = ctx->key_dma;
396 }
397
398 desc = ctx->sh_desc_enc;
399 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
400 false);
401 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
402 desc_bytes(desc), ctx->dir);
403
404 /*
405 * Job Descriptor and Shared Descriptors
406 * must all fit into the 64-word Descriptor h/w Buffer
407 */
408 if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
409 ctx->cdata.key_inline = true;
410 ctx->cdata.key_virt = ctx->key;
411 } else {
412 ctx->cdata.key_inline = false;
413 ctx->cdata.key_dma = ctx->key_dma;
414 }
415
416 desc = ctx->sh_desc_dec;
417 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
418 false);
419 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
420 desc_bytes(desc), ctx->dir);
421
422 return 0;
423}
424
425static int rfc4106_setauthsize(struct crypto_aead *authenc,
426 unsigned int authsize)
427{
428 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
David Brazdil0f672f62019-12-10 10:32:29 +0000429 int err;
430
431 err = crypto_rfc4106_check_authsize(authsize);
432 if (err)
433 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434
435 ctx->authsize = authsize;
436 rfc4106_set_sh_desc(authenc);
437
438 return 0;
439}
440
441static int rfc4543_set_sh_desc(struct crypto_aead *aead)
442{
443 struct caam_ctx *ctx = crypto_aead_ctx(aead);
444 struct device *jrdev = ctx->jrdev;
445 unsigned int ivsize = crypto_aead_ivsize(aead);
446 u32 *desc;
447 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
448 ctx->cdata.keylen;
449
450 if (!ctx->cdata.keylen || !ctx->authsize)
451 return 0;
452
453 /*
454 * RFC4543 encrypt shared descriptor
455 * Job Descriptor and Shared Descriptor
456 * must fit into the 64-word Descriptor h/w Buffer
457 */
458 if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
459 ctx->cdata.key_inline = true;
460 ctx->cdata.key_virt = ctx->key;
461 } else {
462 ctx->cdata.key_inline = false;
463 ctx->cdata.key_dma = ctx->key_dma;
464 }
465
466 desc = ctx->sh_desc_enc;
467 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
468 false);
469 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
470 desc_bytes(desc), ctx->dir);
471
472 /*
473 * Job Descriptor and Shared Descriptors
474 * must all fit into the 64-word Descriptor h/w Buffer
475 */
476 if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
477 ctx->cdata.key_inline = true;
478 ctx->cdata.key_virt = ctx->key;
479 } else {
480 ctx->cdata.key_inline = false;
481 ctx->cdata.key_dma = ctx->key_dma;
482 }
483
484 desc = ctx->sh_desc_dec;
485 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
486 false);
487 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
488 desc_bytes(desc), ctx->dir);
489
490 return 0;
491}
492
493static int rfc4543_setauthsize(struct crypto_aead *authenc,
494 unsigned int authsize)
495{
496 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
497
David Brazdil0f672f62019-12-10 10:32:29 +0000498 if (authsize != 16)
499 return -EINVAL;
500
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501 ctx->authsize = authsize;
502 rfc4543_set_sh_desc(authenc);
503
504 return 0;
505}
506
David Brazdil0f672f62019-12-10 10:32:29 +0000507static int chachapoly_set_sh_desc(struct crypto_aead *aead)
508{
509 struct caam_ctx *ctx = crypto_aead_ctx(aead);
510 struct device *jrdev = ctx->jrdev;
511 unsigned int ivsize = crypto_aead_ivsize(aead);
512 u32 *desc;
513
514 if (!ctx->cdata.keylen || !ctx->authsize)
515 return 0;
516
517 desc = ctx->sh_desc_enc;
518 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
519 ctx->authsize, true, false);
520 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
521 desc_bytes(desc), ctx->dir);
522
523 desc = ctx->sh_desc_dec;
524 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
525 ctx->authsize, false, false);
526 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
527 desc_bytes(desc), ctx->dir);
528
529 return 0;
530}
531
532static int chachapoly_setauthsize(struct crypto_aead *aead,
533 unsigned int authsize)
534{
535 struct caam_ctx *ctx = crypto_aead_ctx(aead);
536
537 if (authsize != POLY1305_DIGEST_SIZE)
538 return -EINVAL;
539
540 ctx->authsize = authsize;
541 return chachapoly_set_sh_desc(aead);
542}
543
544static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
545 unsigned int keylen)
546{
547 struct caam_ctx *ctx = crypto_aead_ctx(aead);
548 unsigned int ivsize = crypto_aead_ivsize(aead);
549 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
550
551 if (keylen != CHACHA_KEY_SIZE + saltlen) {
552 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
553 return -EINVAL;
554 }
555
556 ctx->cdata.key_virt = key;
557 ctx->cdata.keylen = keylen - saltlen;
558
559 return chachapoly_set_sh_desc(aead);
560}
561
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562static int aead_setkey(struct crypto_aead *aead,
563 const u8 *key, unsigned int keylen)
564{
565 struct caam_ctx *ctx = crypto_aead_ctx(aead);
566 struct device *jrdev = ctx->jrdev;
567 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
568 struct crypto_authenc_keys keys;
569 int ret = 0;
570
571 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
572 goto badkey;
573
David Brazdil0f672f62019-12-10 10:32:29 +0000574 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 keys.authkeylen + keys.enckeylen, keys.enckeylen,
576 keys.authkeylen);
David Brazdil0f672f62019-12-10 10:32:29 +0000577 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
578 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000579
580 /*
581 * If DKP is supported, use it in the shared descriptor to generate
582 * the split key.
583 */
584 if (ctrlpriv->era >= 6) {
585 ctx->adata.keylen = keys.authkeylen;
586 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
587 OP_ALG_ALGSEL_MASK);
588
589 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
590 goto badkey;
591
592 memcpy(ctx->key, keys.authkey, keys.authkeylen);
593 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
594 keys.enckeylen);
595 dma_sync_single_for_device(jrdev, ctx->key_dma,
596 ctx->adata.keylen_pad +
597 keys.enckeylen, ctx->dir);
598 goto skip_split_key;
599 }
600
601 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
602 keys.authkeylen, CAAM_MAX_KEY_SIZE -
603 keys.enckeylen);
604 if (ret) {
605 goto badkey;
606 }
607
608 /* postpend encryption key to auth split key */
609 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
610 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
611 keys.enckeylen, ctx->dir);
David Brazdil0f672f62019-12-10 10:32:29 +0000612
613 print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
614 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
615 ctx->adata.keylen_pad + keys.enckeylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000616
617skip_split_key:
618 ctx->cdata.keylen = keys.enckeylen;
619 memzero_explicit(&keys, sizeof(keys));
620 return aead_set_sh_desc(aead);
621badkey:
622 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
623 memzero_explicit(&keys, sizeof(keys));
624 return -EINVAL;
625}
626
David Brazdil0f672f62019-12-10 10:32:29 +0000627static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
628 unsigned int keylen)
629{
630 struct crypto_authenc_keys keys;
631 int err;
632
633 err = crypto_authenc_extractkeys(&keys, key, keylen);
634 if (unlikely(err))
635 return err;
636
637 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
638 aead_setkey(aead, key, keylen);
639
640 memzero_explicit(&keys, sizeof(keys));
641 return err;
642}
643
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644static int gcm_setkey(struct crypto_aead *aead,
645 const u8 *key, unsigned int keylen)
646{
647 struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 struct device *jrdev = ctx->jrdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000649 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650
David Brazdil0f672f62019-12-10 10:32:29 +0000651 err = aes_check_keylen(keylen);
652 if (err) {
653 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
654 return err;
655 }
656
657 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
658 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000659
660 memcpy(ctx->key, key, keylen);
661 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
662 ctx->cdata.keylen = keylen;
663
664 return gcm_set_sh_desc(aead);
665}
666
667static int rfc4106_setkey(struct crypto_aead *aead,
668 const u8 *key, unsigned int keylen)
669{
670 struct caam_ctx *ctx = crypto_aead_ctx(aead);
671 struct device *jrdev = ctx->jrdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000672 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673
David Brazdil0f672f62019-12-10 10:32:29 +0000674 err = aes_check_keylen(keylen - 4);
675 if (err) {
676 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
677 return err;
678 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679
David Brazdil0f672f62019-12-10 10:32:29 +0000680 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
681 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000682
683 memcpy(ctx->key, key, keylen);
684
685 /*
686 * The last four bytes of the key material are used as the salt value
687 * in the nonce. Update the AES key length.
688 */
689 ctx->cdata.keylen = keylen - 4;
690 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
691 ctx->dir);
692 return rfc4106_set_sh_desc(aead);
693}
694
695static int rfc4543_setkey(struct crypto_aead *aead,
696 const u8 *key, unsigned int keylen)
697{
698 struct caam_ctx *ctx = crypto_aead_ctx(aead);
699 struct device *jrdev = ctx->jrdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000700 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000701
David Brazdil0f672f62019-12-10 10:32:29 +0000702 err = aes_check_keylen(keylen - 4);
703 if (err) {
704 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
705 return err;
706 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000707
David Brazdil0f672f62019-12-10 10:32:29 +0000708 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
709 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710
711 memcpy(ctx->key, key, keylen);
712
713 /*
714 * The last four bytes of the key material are used as the salt value
715 * in the nonce. Update the AES key length.
716 */
717 ctx->cdata.keylen = keylen - 4;
718 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
719 ctx->dir);
720 return rfc4543_set_sh_desc(aead);
721}
722
David Brazdil0f672f62019-12-10 10:32:29 +0000723static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
724 unsigned int keylen, const u32 ctx1_iv_off)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000725{
David Brazdil0f672f62019-12-10 10:32:29 +0000726 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
727 struct caam_skcipher_alg *alg =
728 container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
729 skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730 struct device *jrdev = ctx->jrdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000731 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000732 u32 *desc;
David Brazdil0f672f62019-12-10 10:32:29 +0000733 const bool is_rfc3686 = alg->caam.rfc3686;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000734
David Brazdil0f672f62019-12-10 10:32:29 +0000735 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
736 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
737
738 ctx->cdata.keylen = keylen;
739 ctx->cdata.key_virt = key;
740 ctx->cdata.key_inline = true;
741
742 /* skcipher_encrypt shared descriptor */
743 desc = ctx->sh_desc_enc;
744 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
745 ctx1_iv_off);
746 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
747 desc_bytes(desc), ctx->dir);
748
749 /* skcipher_decrypt shared descriptor */
750 desc = ctx->sh_desc_dec;
751 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
752 ctx1_iv_off);
753 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
754 desc_bytes(desc), ctx->dir);
755
756 return 0;
757}
758
759static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
760 const u8 *key, unsigned int keylen)
761{
762 int err;
763
764 err = aes_check_keylen(keylen);
765 if (err) {
766 crypto_skcipher_set_flags(skcipher,
767 CRYPTO_TFM_RES_BAD_KEY_LEN);
768 return err;
769 }
770
771 return skcipher_setkey(skcipher, key, keylen, 0);
772}
773
774static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
775 const u8 *key, unsigned int keylen)
776{
777 u32 ctx1_iv_off;
778 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779
780 /*
781 * RFC3686 specific:
782 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
783 * | *key = {KEY, NONCE}
784 */
David Brazdil0f672f62019-12-10 10:32:29 +0000785 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
786 keylen -= CTR_RFC3686_NONCE_SIZE;
787
788 err = aes_check_keylen(keylen);
789 if (err) {
790 crypto_skcipher_set_flags(skcipher,
791 CRYPTO_TFM_RES_BAD_KEY_LEN);
792 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000793 }
794
David Brazdil0f672f62019-12-10 10:32:29 +0000795 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000796}
797
David Brazdil0f672f62019-12-10 10:32:29 +0000798static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
799 const u8 *key, unsigned int keylen)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000800{
David Brazdil0f672f62019-12-10 10:32:29 +0000801 u32 ctx1_iv_off;
802 int err;
803
804 /*
805 * AES-CTR needs to load IV in CONTEXT1 reg
806 * at an offset of 128bits (16bytes)
807 * CONTEXT1[255:128] = IV
808 */
809 ctx1_iv_off = 16;
810
811 err = aes_check_keylen(keylen);
812 if (err) {
813 crypto_skcipher_set_flags(skcipher,
814 CRYPTO_TFM_RES_BAD_KEY_LEN);
815 return err;
816 }
817
818 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
819}
820
821static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
822 const u8 *key, unsigned int keylen)
823{
824 return skcipher_setkey(skcipher, key, keylen, 0);
825}
826
827static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
828 const u8 *key, unsigned int keylen)
829{
830 return verify_skcipher_des_key(skcipher, key) ?:
831 skcipher_setkey(skcipher, key, keylen, 0);
832}
833
834static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
835 const u8 *key, unsigned int keylen)
836{
837 return verify_skcipher_des3_key(skcipher, key) ?:
838 skcipher_setkey(skcipher, key, keylen, 0);
839}
840
841static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
842 unsigned int keylen)
843{
844 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000845 struct device *jrdev = ctx->jrdev;
846 u32 *desc;
847
848 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
David Brazdil0f672f62019-12-10 10:32:29 +0000849 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000850 dev_err(jrdev, "key size mismatch\n");
851 return -EINVAL;
852 }
853
854 ctx->cdata.keylen = keylen;
855 ctx->cdata.key_virt = key;
856 ctx->cdata.key_inline = true;
857
David Brazdil0f672f62019-12-10 10:32:29 +0000858 /* xts_skcipher_encrypt shared descriptor */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000859 desc = ctx->sh_desc_enc;
David Brazdil0f672f62019-12-10 10:32:29 +0000860 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000861 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
862 desc_bytes(desc), ctx->dir);
863
David Brazdil0f672f62019-12-10 10:32:29 +0000864 /* xts_skcipher_decrypt shared descriptor */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000865 desc = ctx->sh_desc_dec;
David Brazdil0f672f62019-12-10 10:32:29 +0000866 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000867 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
868 desc_bytes(desc), ctx->dir);
869
870 return 0;
871}
872
873/*
874 * aead_edesc - s/w-extended aead descriptor
875 * @src_nents: number of segments in input s/w scatterlist
876 * @dst_nents: number of segments in output s/w scatterlist
David Brazdil0f672f62019-12-10 10:32:29 +0000877 * @mapped_src_nents: number of segments in input h/w link table
878 * @mapped_dst_nents: number of segments in output h/w link table
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000879 * @sec4_sg_bytes: length of dma mapped sec4_sg space
880 * @sec4_sg_dma: bus physical mapped address of h/w link table
881 * @sec4_sg: pointer to h/w link table
882 * @hw_desc: the h/w job descriptor followed by any referenced link tables
883 */
884struct aead_edesc {
885 int src_nents;
886 int dst_nents;
David Brazdil0f672f62019-12-10 10:32:29 +0000887 int mapped_src_nents;
888 int mapped_dst_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000889 int sec4_sg_bytes;
890 dma_addr_t sec4_sg_dma;
891 struct sec4_sg_entry *sec4_sg;
892 u32 hw_desc[];
893};
894
895/*
David Brazdil0f672f62019-12-10 10:32:29 +0000896 * skcipher_edesc - s/w-extended skcipher descriptor
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000897 * @src_nents: number of segments in input s/w scatterlist
898 * @dst_nents: number of segments in output s/w scatterlist
David Brazdil0f672f62019-12-10 10:32:29 +0000899 * @mapped_src_nents: number of segments in input h/w link table
900 * @mapped_dst_nents: number of segments in output h/w link table
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000901 * @iv_dma: dma address of iv for checking continuity and link table
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000902 * @sec4_sg_bytes: length of dma mapped sec4_sg space
903 * @sec4_sg_dma: bus physical mapped address of h/w link table
904 * @sec4_sg: pointer to h/w link table
905 * @hw_desc: the h/w job descriptor followed by any referenced link tables
906 * and IV
907 */
David Brazdil0f672f62019-12-10 10:32:29 +0000908struct skcipher_edesc {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000909 int src_nents;
910 int dst_nents;
David Brazdil0f672f62019-12-10 10:32:29 +0000911 int mapped_src_nents;
912 int mapped_dst_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000913 dma_addr_t iv_dma;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 int sec4_sg_bytes;
915 dma_addr_t sec4_sg_dma;
916 struct sec4_sg_entry *sec4_sg;
917 u32 hw_desc[0];
918};
919
920static void caam_unmap(struct device *dev, struct scatterlist *src,
921 struct scatterlist *dst, int src_nents,
922 int dst_nents,
David Brazdil0f672f62019-12-10 10:32:29 +0000923 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000924 int sec4_sg_bytes)
925{
926 if (dst != src) {
927 if (src_nents)
928 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
David Brazdil0f672f62019-12-10 10:32:29 +0000929 if (dst_nents)
930 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000931 } else {
932 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
933 }
934
935 if (iv_dma)
David Brazdil0f672f62019-12-10 10:32:29 +0000936 dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000937 if (sec4_sg_bytes)
938 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
939 DMA_TO_DEVICE);
940}
941
942static void aead_unmap(struct device *dev,
943 struct aead_edesc *edesc,
944 struct aead_request *req)
945{
946 caam_unmap(dev, req->src, req->dst,
David Brazdil0f672f62019-12-10 10:32:29 +0000947 edesc->src_nents, edesc->dst_nents, 0, 0,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000948 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
949}
950
David Brazdil0f672f62019-12-10 10:32:29 +0000951static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
952 struct skcipher_request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000953{
David Brazdil0f672f62019-12-10 10:32:29 +0000954 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
955 int ivsize = crypto_skcipher_ivsize(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000956
957 caam_unmap(dev, req->src, req->dst,
958 edesc->src_nents, edesc->dst_nents,
David Brazdil0f672f62019-12-10 10:32:29 +0000959 edesc->iv_dma, ivsize,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000960 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
961}
962
963static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
964 void *context)
965{
966 struct aead_request *req = context;
967 struct aead_edesc *edesc;
David Brazdil0f672f62019-12-10 10:32:29 +0000968 int ecode = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000969
David Brazdil0f672f62019-12-10 10:32:29 +0000970 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000971
972 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
973
974 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +0000975 ecode = caam_jr_strstatus(jrdev, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000976
977 aead_unmap(jrdev, edesc, req);
978
979 kfree(edesc);
980
David Brazdil0f672f62019-12-10 10:32:29 +0000981 aead_request_complete(req, ecode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000982}
983
984static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
985 void *context)
986{
987 struct aead_request *req = context;
988 struct aead_edesc *edesc;
David Brazdil0f672f62019-12-10 10:32:29 +0000989 int ecode = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000990
David Brazdil0f672f62019-12-10 10:32:29 +0000991 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000992
993 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
994
995 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +0000996 ecode = caam_jr_strstatus(jrdev, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000997
998 aead_unmap(jrdev, edesc, req);
999
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001000 kfree(edesc);
1001
David Brazdil0f672f62019-12-10 10:32:29 +00001002 aead_request_complete(req, ecode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001003}
1004
David Brazdil0f672f62019-12-10 10:32:29 +00001005static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1006 void *context)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001007{
David Brazdil0f672f62019-12-10 10:32:29 +00001008 struct skcipher_request *req = context;
1009 struct skcipher_edesc *edesc;
1010 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1011 int ivsize = crypto_skcipher_ivsize(skcipher);
1012 int ecode = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001013
David Brazdil0f672f62019-12-10 10:32:29 +00001014 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001015
David Brazdil0f672f62019-12-10 10:32:29 +00001016 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001017
1018 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00001019 ecode = caam_jr_strstatus(jrdev, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001020
David Brazdil0f672f62019-12-10 10:32:29 +00001021 skcipher_unmap(jrdev, edesc, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001022
1023 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001024 * The crypto API expects us to set the IV (req->iv) to the last
1025 * ciphertext block (CBC mode) or last counter (CTR mode).
1026 * This is used e.g. by the CTS mode.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001027 */
David Brazdil0f672f62019-12-10 10:32:29 +00001028 if (ivsize && !ecode) {
1029 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1030 ivsize);
1031 print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
1032 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1033 edesc->src_nents > 1 ? 100 : ivsize, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001034 }
1035
David Brazdil0f672f62019-12-10 10:32:29 +00001036 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1037 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1038 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1039
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001040 kfree(edesc);
1041
David Brazdil0f672f62019-12-10 10:32:29 +00001042 skcipher_request_complete(req, ecode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001043}
1044
David Brazdil0f672f62019-12-10 10:32:29 +00001045static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1046 void *context)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001047{
David Brazdil0f672f62019-12-10 10:32:29 +00001048 struct skcipher_request *req = context;
1049 struct skcipher_edesc *edesc;
1050 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1051 int ivsize = crypto_skcipher_ivsize(skcipher);
1052 int ecode = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001053
David Brazdil0f672f62019-12-10 10:32:29 +00001054 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001055
David Brazdil0f672f62019-12-10 10:32:29 +00001056 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001057 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00001058 ecode = caam_jr_strstatus(jrdev, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001059
David Brazdil0f672f62019-12-10 10:32:29 +00001060 skcipher_unmap(jrdev, edesc, req);
1061
1062 /*
1063 * The crypto API expects us to set the IV (req->iv) to the last
1064 * ciphertext block (CBC mode) or last counter (CTR mode).
1065 * This is used e.g. by the CTS mode.
1066 */
1067 if (ivsize && !ecode) {
1068 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1069 ivsize);
1070
1071 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1072 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1073 ivsize, 1);
1074 }
1075
1076 caam_dump_sg("dst @" __stringify(__LINE__)": ",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001077 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
David Brazdil0f672f62019-12-10 10:32:29 +00001078 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001079
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001080 kfree(edesc);
1081
David Brazdil0f672f62019-12-10 10:32:29 +00001082 skcipher_request_complete(req, ecode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001083}
1084
1085/*
1086 * Fill in aead job descriptor
1087 */
1088static void init_aead_job(struct aead_request *req,
1089 struct aead_edesc *edesc,
1090 bool all_contig, bool encrypt)
1091{
1092 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1093 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1094 int authsize = ctx->authsize;
1095 u32 *desc = edesc->hw_desc;
1096 u32 out_options, in_options;
1097 dma_addr_t dst_dma, src_dma;
1098 int len, sec4_sg_index = 0;
1099 dma_addr_t ptr;
1100 u32 *sh_desc;
1101
1102 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1103 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1104
1105 len = desc_len(sh_desc);
1106 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1107
1108 if (all_contig) {
David Brazdil0f672f62019-12-10 10:32:29 +00001109 src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
1110 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111 in_options = 0;
1112 } else {
1113 src_dma = edesc->sec4_sg_dma;
David Brazdil0f672f62019-12-10 10:32:29 +00001114 sec4_sg_index += edesc->mapped_src_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001115 in_options = LDST_SGF;
1116 }
1117
1118 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1119 in_options);
1120
1121 dst_dma = src_dma;
1122 out_options = in_options;
1123
1124 if (unlikely(req->src != req->dst)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001125 if (!edesc->mapped_dst_nents) {
1126 dst_dma = 0;
1127 out_options = 0;
1128 } else if (edesc->mapped_dst_nents == 1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001129 dst_dma = sg_dma_address(req->dst);
David Brazdil0f672f62019-12-10 10:32:29 +00001130 out_options = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001131 } else {
1132 dst_dma = edesc->sec4_sg_dma +
1133 sec4_sg_index *
1134 sizeof(struct sec4_sg_entry);
1135 out_options = LDST_SGF;
1136 }
1137 }
1138
1139 if (encrypt)
1140 append_seq_out_ptr(desc, dst_dma,
1141 req->assoclen + req->cryptlen + authsize,
1142 out_options);
1143 else
1144 append_seq_out_ptr(desc, dst_dma,
1145 req->assoclen + req->cryptlen - authsize,
1146 out_options);
1147}
1148
1149static void init_gcm_job(struct aead_request *req,
1150 struct aead_edesc *edesc,
1151 bool all_contig, bool encrypt)
1152{
1153 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1154 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1155 unsigned int ivsize = crypto_aead_ivsize(aead);
1156 u32 *desc = edesc->hw_desc;
1157 bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
1158 unsigned int last;
1159
1160 init_aead_job(req, edesc, all_contig, encrypt);
1161 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1162
1163 /* BUG This should not be specific to generic GCM. */
1164 last = 0;
1165 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1166 last = FIFOLD_TYPE_LAST1;
1167
1168 /* Read GCM IV */
1169 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1170 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
1171 /* Append Salt */
1172 if (!generic_gcm)
1173 append_data(desc, ctx->key + ctx->cdata.keylen, 4);
1174 /* Append IV */
1175 append_data(desc, req->iv, ivsize);
1176 /* End of blank commands */
1177}
1178
David Brazdil0f672f62019-12-10 10:32:29 +00001179static void init_chachapoly_job(struct aead_request *req,
1180 struct aead_edesc *edesc, bool all_contig,
1181 bool encrypt)
1182{
1183 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1184 unsigned int ivsize = crypto_aead_ivsize(aead);
1185 unsigned int assoclen = req->assoclen;
1186 u32 *desc = edesc->hw_desc;
1187 u32 ctx_iv_off = 4;
1188
1189 init_aead_job(req, edesc, all_contig, encrypt);
1190
1191 if (ivsize != CHACHAPOLY_IV_SIZE) {
1192 /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1193 ctx_iv_off += 4;
1194
1195 /*
1196 * The associated data comes already with the IV but we need
1197 * to skip it when we authenticate or encrypt...
1198 */
1199 assoclen -= ivsize;
1200 }
1201
1202 append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1203
1204 /*
1205 * For IPsec load the IV further in the same register.
1206 * For RFC7539 simply load the 12 bytes nonce in a single operation
1207 */
1208 append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1209 LDST_SRCDST_BYTE_CONTEXT |
1210 ctx_iv_off << LDST_OFFSET_SHIFT);
1211}
1212
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001213static void init_authenc_job(struct aead_request *req,
1214 struct aead_edesc *edesc,
1215 bool all_contig, bool encrypt)
1216{
1217 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1218 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1219 struct caam_aead_alg, aead);
1220 unsigned int ivsize = crypto_aead_ivsize(aead);
1221 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1222 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1223 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1224 OP_ALG_AAI_CTR_MOD128);
1225 const bool is_rfc3686 = alg->caam.rfc3686;
1226 u32 *desc = edesc->hw_desc;
1227 u32 ivoffset = 0;
1228
1229 /*
1230 * AES-CTR needs to load IV in CONTEXT1 reg
1231 * at an offset of 128bits (16bytes)
1232 * CONTEXT1[255:128] = IV
1233 */
1234 if (ctr_mode)
1235 ivoffset = 16;
1236
1237 /*
1238 * RFC3686 specific:
1239 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1240 */
1241 if (is_rfc3686)
1242 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
1243
1244 init_aead_job(req, edesc, all_contig, encrypt);
1245
1246 /*
1247 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1248 * having DPOVRD as destination.
1249 */
1250 if (ctrlpriv->era < 3)
1251 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1252 else
1253 append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1254
1255 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1256 append_load_as_imm(desc, req->iv, ivsize,
1257 LDST_CLASS_1_CCB |
1258 LDST_SRCDST_BYTE_CONTEXT |
1259 (ivoffset << LDST_OFFSET_SHIFT));
1260}
1261
1262/*
David Brazdil0f672f62019-12-10 10:32:29 +00001263 * Fill in skcipher job descriptor
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264 */
David Brazdil0f672f62019-12-10 10:32:29 +00001265static void init_skcipher_job(struct skcipher_request *req,
1266 struct skcipher_edesc *edesc,
1267 const bool encrypt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001268{
David Brazdil0f672f62019-12-10 10:32:29 +00001269 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1270 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1271 struct device *jrdev = ctx->jrdev;
1272 int ivsize = crypto_skcipher_ivsize(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001273 u32 *desc = edesc->hw_desc;
David Brazdil0f672f62019-12-10 10:32:29 +00001274 u32 *sh_desc;
1275 u32 in_options = 0, out_options = 0;
1276 dma_addr_t src_dma, dst_dma, ptr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001277 int len, sec4_sg_index = 0;
1278
David Brazdil0f672f62019-12-10 10:32:29 +00001279 print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
1280 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1281 dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
1282 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1283
1284 caam_dump_sg("src @" __stringify(__LINE__)": ",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001285 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
David Brazdil0f672f62019-12-10 10:32:29 +00001286 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1287
1288 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1289 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001290
1291 len = desc_len(sh_desc);
1292 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1293
David Brazdil0f672f62019-12-10 10:32:29 +00001294 if (ivsize || edesc->mapped_src_nents > 1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001295 src_dma = edesc->sec4_sg_dma;
David Brazdil0f672f62019-12-10 10:32:29 +00001296 sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001297 in_options = LDST_SGF;
David Brazdil0f672f62019-12-10 10:32:29 +00001298 } else {
1299 src_dma = sg_dma_address(req->src);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001300 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001301
David Brazdil0f672f62019-12-10 10:32:29 +00001302 append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
1303
1304 if (likely(req->src == req->dst)) {
1305 dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
1306 out_options = in_options;
1307 } else if (!ivsize && edesc->mapped_dst_nents == 1) {
1308 dst_dma = sg_dma_address(req->dst);
1309 } else {
1310 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1311 sizeof(struct sec4_sg_entry);
1312 out_options = LDST_SGF;
1313 }
1314
1315 append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001316}
1317
1318/*
1319 * allocate and map the aead extended descriptor
1320 */
1321static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1322 int desc_bytes, bool *all_contig_ptr,
1323 bool encrypt)
1324{
1325 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1326 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1327 struct device *jrdev = ctx->jrdev;
1328 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1329 GFP_KERNEL : GFP_ATOMIC;
1330 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001331 int src_len, dst_len = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001332 struct aead_edesc *edesc;
1333 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1334 unsigned int authsize = ctx->authsize;
1335
1336 if (unlikely(req->dst != req->src)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001337 src_len = req->assoclen + req->cryptlen;
1338 dst_len = src_len + (encrypt ? authsize : (-authsize));
1339
1340 src_nents = sg_nents_for_len(req->src, src_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001341 if (unlikely(src_nents < 0)) {
1342 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001343 src_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001344 return ERR_PTR(src_nents);
1345 }
1346
David Brazdil0f672f62019-12-10 10:32:29 +00001347 dst_nents = sg_nents_for_len(req->dst, dst_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001348 if (unlikely(dst_nents < 0)) {
1349 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001350 dst_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001351 return ERR_PTR(dst_nents);
1352 }
1353 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001354 src_len = req->assoclen + req->cryptlen +
1355 (encrypt ? authsize : 0);
1356
1357 src_nents = sg_nents_for_len(req->src, src_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001358 if (unlikely(src_nents < 0)) {
1359 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001360 src_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001361 return ERR_PTR(src_nents);
1362 }
1363 }
1364
1365 if (likely(req->src == req->dst)) {
1366 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1367 DMA_BIDIRECTIONAL);
1368 if (unlikely(!mapped_src_nents)) {
1369 dev_err(jrdev, "unable to map source\n");
1370 return ERR_PTR(-ENOMEM);
1371 }
1372 } else {
1373 /* Cover also the case of null (zero length) input data */
1374 if (src_nents) {
1375 mapped_src_nents = dma_map_sg(jrdev, req->src,
1376 src_nents, DMA_TO_DEVICE);
1377 if (unlikely(!mapped_src_nents)) {
1378 dev_err(jrdev, "unable to map source\n");
1379 return ERR_PTR(-ENOMEM);
1380 }
1381 } else {
1382 mapped_src_nents = 0;
1383 }
1384
David Brazdil0f672f62019-12-10 10:32:29 +00001385 /* Cover also the case of null (zero length) output data */
1386 if (dst_nents) {
1387 mapped_dst_nents = dma_map_sg(jrdev, req->dst,
1388 dst_nents,
1389 DMA_FROM_DEVICE);
1390 if (unlikely(!mapped_dst_nents)) {
1391 dev_err(jrdev, "unable to map destination\n");
1392 dma_unmap_sg(jrdev, req->src, src_nents,
1393 DMA_TO_DEVICE);
1394 return ERR_PTR(-ENOMEM);
1395 }
1396 } else {
1397 mapped_dst_nents = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001398 }
1399 }
1400
David Brazdil0f672f62019-12-10 10:32:29 +00001401 /*
1402 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1403 * the end of the table by allocating more S/G entries.
1404 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001405 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001406 if (mapped_dst_nents > 1)
1407 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
1408 else
1409 sec4_sg_len = pad_sg_nents(sec4_sg_len);
1410
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001411 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1412
1413 /* allocate space for base edesc and hw desc commands, link tables */
1414 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1415 GFP_DMA | flags);
1416 if (!edesc) {
1417 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00001418 0, 0, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001419 return ERR_PTR(-ENOMEM);
1420 }
1421
1422 edesc->src_nents = src_nents;
1423 edesc->dst_nents = dst_nents;
David Brazdil0f672f62019-12-10 10:32:29 +00001424 edesc->mapped_src_nents = mapped_src_nents;
1425 edesc->mapped_dst_nents = mapped_dst_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001426 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1427 desc_bytes;
1428 *all_contig_ptr = !(mapped_src_nents > 1);
1429
1430 sec4_sg_index = 0;
1431 if (mapped_src_nents > 1) {
David Brazdil0f672f62019-12-10 10:32:29 +00001432 sg_to_sec4_sg_last(req->src, src_len,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001433 edesc->sec4_sg + sec4_sg_index, 0);
1434 sec4_sg_index += mapped_src_nents;
1435 }
1436 if (mapped_dst_nents > 1) {
David Brazdil0f672f62019-12-10 10:32:29 +00001437 sg_to_sec4_sg_last(req->dst, dst_len,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001438 edesc->sec4_sg + sec4_sg_index, 0);
1439 }
1440
1441 if (!sec4_sg_bytes)
1442 return edesc;
1443
1444 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1445 sec4_sg_bytes, DMA_TO_DEVICE);
1446 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1447 dev_err(jrdev, "unable to map S/G table\n");
1448 aead_unmap(jrdev, edesc, req);
1449 kfree(edesc);
1450 return ERR_PTR(-ENOMEM);
1451 }
1452
1453 edesc->sec4_sg_bytes = sec4_sg_bytes;
1454
1455 return edesc;
1456}
1457
1458static int gcm_encrypt(struct aead_request *req)
1459{
1460 struct aead_edesc *edesc;
1461 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1462 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1463 struct device *jrdev = ctx->jrdev;
1464 bool all_contig;
1465 u32 *desc;
1466 int ret = 0;
1467
1468 /* allocate extended descriptor */
1469 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
1470 if (IS_ERR(edesc))
1471 return PTR_ERR(edesc);
1472
1473 /* Create and submit job descriptor */
1474 init_gcm_job(req, edesc, all_contig, true);
David Brazdil0f672f62019-12-10 10:32:29 +00001475
1476 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1477 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1478 desc_bytes(edesc->hw_desc), 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001479
1480 desc = edesc->hw_desc;
1481 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1482 if (!ret) {
1483 ret = -EINPROGRESS;
1484 } else {
1485 aead_unmap(jrdev, edesc, req);
1486 kfree(edesc);
1487 }
1488
1489 return ret;
1490}
1491
David Brazdil0f672f62019-12-10 10:32:29 +00001492static int chachapoly_encrypt(struct aead_request *req)
1493{
1494 struct aead_edesc *edesc;
1495 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1496 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1497 struct device *jrdev = ctx->jrdev;
1498 bool all_contig;
1499 u32 *desc;
1500 int ret;
1501
1502 edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1503 true);
1504 if (IS_ERR(edesc))
1505 return PTR_ERR(edesc);
1506
1507 desc = edesc->hw_desc;
1508
1509 init_chachapoly_job(req, edesc, all_contig, true);
1510 print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
1511 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1512 1);
1513
1514 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1515 if (!ret) {
1516 ret = -EINPROGRESS;
1517 } else {
1518 aead_unmap(jrdev, edesc, req);
1519 kfree(edesc);
1520 }
1521
1522 return ret;
1523}
1524
1525static int chachapoly_decrypt(struct aead_request *req)
1526{
1527 struct aead_edesc *edesc;
1528 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1529 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1530 struct device *jrdev = ctx->jrdev;
1531 bool all_contig;
1532 u32 *desc;
1533 int ret;
1534
1535 edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1536 false);
1537 if (IS_ERR(edesc))
1538 return PTR_ERR(edesc);
1539
1540 desc = edesc->hw_desc;
1541
1542 init_chachapoly_job(req, edesc, all_contig, false);
1543 print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
1544 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1545 1);
1546
1547 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1548 if (!ret) {
1549 ret = -EINPROGRESS;
1550 } else {
1551 aead_unmap(jrdev, edesc, req);
1552 kfree(edesc);
1553 }
1554
1555 return ret;
1556}
1557
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001558static int ipsec_gcm_encrypt(struct aead_request *req)
1559{
David Brazdil0f672f62019-12-10 10:32:29 +00001560 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001561}
1562
1563static int aead_encrypt(struct aead_request *req)
1564{
1565 struct aead_edesc *edesc;
1566 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1567 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1568 struct device *jrdev = ctx->jrdev;
1569 bool all_contig;
1570 u32 *desc;
1571 int ret = 0;
1572
1573 /* allocate extended descriptor */
1574 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1575 &all_contig, true);
1576 if (IS_ERR(edesc))
1577 return PTR_ERR(edesc);
1578
1579 /* Create and submit job descriptor */
1580 init_authenc_job(req, edesc, all_contig, true);
David Brazdil0f672f62019-12-10 10:32:29 +00001581
1582 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1583 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1584 desc_bytes(edesc->hw_desc), 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001585
1586 desc = edesc->hw_desc;
1587 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1588 if (!ret) {
1589 ret = -EINPROGRESS;
1590 } else {
1591 aead_unmap(jrdev, edesc, req);
1592 kfree(edesc);
1593 }
1594
1595 return ret;
1596}
1597
1598static int gcm_decrypt(struct aead_request *req)
1599{
1600 struct aead_edesc *edesc;
1601 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1602 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1603 struct device *jrdev = ctx->jrdev;
1604 bool all_contig;
1605 u32 *desc;
1606 int ret = 0;
1607
1608 /* allocate extended descriptor */
1609 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
1610 if (IS_ERR(edesc))
1611 return PTR_ERR(edesc);
1612
1613 /* Create and submit job descriptor*/
1614 init_gcm_job(req, edesc, all_contig, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001615
1616 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1617 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1618 desc_bytes(edesc->hw_desc), 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001619
1620 desc = edesc->hw_desc;
1621 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1622 if (!ret) {
1623 ret = -EINPROGRESS;
1624 } else {
1625 aead_unmap(jrdev, edesc, req);
1626 kfree(edesc);
1627 }
1628
1629 return ret;
1630}
1631
1632static int ipsec_gcm_decrypt(struct aead_request *req)
1633{
David Brazdil0f672f62019-12-10 10:32:29 +00001634 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001635}
1636
1637static int aead_decrypt(struct aead_request *req)
1638{
1639 struct aead_edesc *edesc;
1640 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1641 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1642 struct device *jrdev = ctx->jrdev;
1643 bool all_contig;
1644 u32 *desc;
1645 int ret = 0;
1646
David Brazdil0f672f62019-12-10 10:32:29 +00001647 caam_dump_sg("dec src@" __stringify(__LINE__)": ",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001648 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1649 req->assoclen + req->cryptlen, 1);
1650
1651 /* allocate extended descriptor */
1652 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1653 &all_contig, false);
1654 if (IS_ERR(edesc))
1655 return PTR_ERR(edesc);
1656
1657 /* Create and submit job descriptor*/
1658 init_authenc_job(req, edesc, all_contig, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001659
1660 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1661 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1662 desc_bytes(edesc->hw_desc), 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001663
1664 desc = edesc->hw_desc;
1665 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1666 if (!ret) {
1667 ret = -EINPROGRESS;
1668 } else {
1669 aead_unmap(jrdev, edesc, req);
1670 kfree(edesc);
1671 }
1672
1673 return ret;
1674}
1675
1676/*
David Brazdil0f672f62019-12-10 10:32:29 +00001677 * allocate and map the skcipher extended descriptor for skcipher
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001678 */
David Brazdil0f672f62019-12-10 10:32:29 +00001679static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1680 int desc_bytes)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001681{
David Brazdil0f672f62019-12-10 10:32:29 +00001682 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1683 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001684 struct device *jrdev = ctx->jrdev;
1685 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1686 GFP_KERNEL : GFP_ATOMIC;
1687 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001688 struct skcipher_edesc *edesc;
1689 dma_addr_t iv_dma = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001690 u8 *iv;
David Brazdil0f672f62019-12-10 10:32:29 +00001691 int ivsize = crypto_skcipher_ivsize(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001692 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1693
David Brazdil0f672f62019-12-10 10:32:29 +00001694 src_nents = sg_nents_for_len(req->src, req->cryptlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001695 if (unlikely(src_nents < 0)) {
1696 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001697 req->cryptlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001698 return ERR_PTR(src_nents);
1699 }
1700
1701 if (req->dst != req->src) {
David Brazdil0f672f62019-12-10 10:32:29 +00001702 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001703 if (unlikely(dst_nents < 0)) {
1704 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001705 req->cryptlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001706 return ERR_PTR(dst_nents);
1707 }
1708 }
1709
1710 if (likely(req->src == req->dst)) {
1711 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1712 DMA_BIDIRECTIONAL);
1713 if (unlikely(!mapped_src_nents)) {
1714 dev_err(jrdev, "unable to map source\n");
1715 return ERR_PTR(-ENOMEM);
1716 }
1717 } else {
1718 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1719 DMA_TO_DEVICE);
1720 if (unlikely(!mapped_src_nents)) {
1721 dev_err(jrdev, "unable to map source\n");
1722 return ERR_PTR(-ENOMEM);
1723 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001724 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1725 DMA_FROM_DEVICE);
1726 if (unlikely(!mapped_dst_nents)) {
1727 dev_err(jrdev, "unable to map destination\n");
1728 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1729 return ERR_PTR(-ENOMEM);
1730 }
1731 }
1732
David Brazdil0f672f62019-12-10 10:32:29 +00001733 if (!ivsize && mapped_src_nents == 1)
1734 sec4_sg_ents = 0; // no need for an input hw s/g table
1735 else
1736 sec4_sg_ents = mapped_src_nents + !!ivsize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001737 dst_sg_idx = sec4_sg_ents;
David Brazdil0f672f62019-12-10 10:32:29 +00001738
1739 /*
1740 * Input, output HW S/G tables: [IV, src][dst, IV]
1741 * IV entries point to the same buffer
1742 * If src == dst, S/G entries are reused (S/G tables overlap)
1743 *
1744 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1745 * the end of the table by allocating more S/G entries. Logic:
1746 * if (output S/G)
1747 * pad output S/G, if needed
1748 * else if (input S/G) ...
1749 * pad input S/G, if needed
1750 */
1751 if (ivsize || mapped_dst_nents > 1) {
1752 if (req->src == req->dst)
1753 sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
1754 else
1755 sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
1756 !!ivsize);
1757 } else {
1758 sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
1759 }
1760
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001761 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1762
1763 /*
1764 * allocate space for base edesc and hw desc commands, link tables, IV
1765 */
1766 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1767 GFP_DMA | flags);
1768 if (!edesc) {
1769 dev_err(jrdev, "could not allocate extended descriptor\n");
1770 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00001771 0, 0, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001772 return ERR_PTR(-ENOMEM);
1773 }
1774
1775 edesc->src_nents = src_nents;
1776 edesc->dst_nents = dst_nents;
David Brazdil0f672f62019-12-10 10:32:29 +00001777 edesc->mapped_src_nents = mapped_src_nents;
1778 edesc->mapped_dst_nents = mapped_dst_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001779 edesc->sec4_sg_bytes = sec4_sg_bytes;
1780 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1781 desc_bytes);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001782
1783 /* Make sure IV is located in a DMAable area */
David Brazdil0f672f62019-12-10 10:32:29 +00001784 if (ivsize) {
1785 iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
1786 memcpy(iv, req->iv, ivsize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001787
David Brazdil0f672f62019-12-10 10:32:29 +00001788 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
1789 if (dma_mapping_error(jrdev, iv_dma)) {
1790 dev_err(jrdev, "unable to map IV\n");
1791 caam_unmap(jrdev, req->src, req->dst, src_nents,
1792 dst_nents, 0, 0, 0, 0);
1793 kfree(edesc);
1794 return ERR_PTR(-ENOMEM);
1795 }
1796
1797 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001798 }
David Brazdil0f672f62019-12-10 10:32:29 +00001799 if (dst_sg_idx)
1800 sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1801 !!ivsize, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001802
David Brazdil0f672f62019-12-10 10:32:29 +00001803 if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
1804 sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1805 dst_sg_idx, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001806
David Brazdil0f672f62019-12-10 10:32:29 +00001807 if (ivsize)
1808 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1809 mapped_dst_nents, iv_dma, ivsize, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001810
David Brazdil0f672f62019-12-10 10:32:29 +00001811 if (ivsize || mapped_dst_nents > 1)
1812 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
1813 mapped_dst_nents);
1814
1815 if (sec4_sg_bytes) {
1816 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1817 sec4_sg_bytes,
1818 DMA_TO_DEVICE);
1819 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1820 dev_err(jrdev, "unable to map S/G table\n");
1821 caam_unmap(jrdev, req->src, req->dst, src_nents,
1822 dst_nents, iv_dma, ivsize, 0, 0);
1823 kfree(edesc);
1824 return ERR_PTR(-ENOMEM);
1825 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001826 }
1827
1828 edesc->iv_dma = iv_dma;
1829
David Brazdil0f672f62019-12-10 10:32:29 +00001830 print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
1831 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1832 sec4_sg_bytes, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001833
1834 return edesc;
1835}
1836
David Brazdil0f672f62019-12-10 10:32:29 +00001837static int skcipher_encrypt(struct skcipher_request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001838{
David Brazdil0f672f62019-12-10 10:32:29 +00001839 struct skcipher_edesc *edesc;
1840 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1841 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001842 struct device *jrdev = ctx->jrdev;
1843 u32 *desc;
1844 int ret = 0;
1845
David Brazdil0f672f62019-12-10 10:32:29 +00001846 if (!req->cryptlen)
1847 return 0;
1848
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001849 /* allocate extended descriptor */
David Brazdil0f672f62019-12-10 10:32:29 +00001850 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001851 if (IS_ERR(edesc))
1852 return PTR_ERR(edesc);
1853
1854 /* Create and submit job descriptor*/
David Brazdil0f672f62019-12-10 10:32:29 +00001855 init_skcipher_job(req, edesc, true);
1856
1857 print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1858 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1859 desc_bytes(edesc->hw_desc), 1);
1860
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001861 desc = edesc->hw_desc;
David Brazdil0f672f62019-12-10 10:32:29 +00001862 ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001863
1864 if (!ret) {
1865 ret = -EINPROGRESS;
1866 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001867 skcipher_unmap(jrdev, edesc, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001868 kfree(edesc);
1869 }
1870
1871 return ret;
1872}
1873
David Brazdil0f672f62019-12-10 10:32:29 +00001874static int skcipher_decrypt(struct skcipher_request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001875{
David Brazdil0f672f62019-12-10 10:32:29 +00001876 struct skcipher_edesc *edesc;
1877 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1878 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001879 struct device *jrdev = ctx->jrdev;
1880 u32 *desc;
1881 int ret = 0;
1882
David Brazdil0f672f62019-12-10 10:32:29 +00001883 if (!req->cryptlen)
1884 return 0;
1885
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001886 /* allocate extended descriptor */
David Brazdil0f672f62019-12-10 10:32:29 +00001887 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001888 if (IS_ERR(edesc))
1889 return PTR_ERR(edesc);
1890
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001891 /* Create and submit job descriptor*/
David Brazdil0f672f62019-12-10 10:32:29 +00001892 init_skcipher_job(req, edesc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001893 desc = edesc->hw_desc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001894
David Brazdil0f672f62019-12-10 10:32:29 +00001895 print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1896 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1897 desc_bytes(edesc->hw_desc), 1);
1898
1899 ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001900 if (!ret) {
1901 ret = -EINPROGRESS;
1902 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001903 skcipher_unmap(jrdev, edesc, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001904 kfree(edesc);
1905 }
1906
1907 return ret;
1908}
1909
David Brazdil0f672f62019-12-10 10:32:29 +00001910static struct caam_skcipher_alg driver_algs[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001911 {
David Brazdil0f672f62019-12-10 10:32:29 +00001912 .skcipher = {
1913 .base = {
1914 .cra_name = "cbc(aes)",
1915 .cra_driver_name = "cbc-aes-caam",
1916 .cra_blocksize = AES_BLOCK_SIZE,
1917 },
1918 .setkey = aes_skcipher_setkey,
1919 .encrypt = skcipher_encrypt,
1920 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001921 .min_keysize = AES_MIN_KEY_SIZE,
1922 .max_keysize = AES_MAX_KEY_SIZE,
1923 .ivsize = AES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001924 },
1925 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001926 },
1927 {
David Brazdil0f672f62019-12-10 10:32:29 +00001928 .skcipher = {
1929 .base = {
1930 .cra_name = "cbc(des3_ede)",
1931 .cra_driver_name = "cbc-3des-caam",
1932 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1933 },
1934 .setkey = des3_skcipher_setkey,
1935 .encrypt = skcipher_encrypt,
1936 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001937 .min_keysize = DES3_EDE_KEY_SIZE,
1938 .max_keysize = DES3_EDE_KEY_SIZE,
1939 .ivsize = DES3_EDE_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001940 },
1941 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001942 },
1943 {
David Brazdil0f672f62019-12-10 10:32:29 +00001944 .skcipher = {
1945 .base = {
1946 .cra_name = "cbc(des)",
1947 .cra_driver_name = "cbc-des-caam",
1948 .cra_blocksize = DES_BLOCK_SIZE,
1949 },
1950 .setkey = des_skcipher_setkey,
1951 .encrypt = skcipher_encrypt,
1952 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001953 .min_keysize = DES_KEY_SIZE,
1954 .max_keysize = DES_KEY_SIZE,
1955 .ivsize = DES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001956 },
1957 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001958 },
1959 {
David Brazdil0f672f62019-12-10 10:32:29 +00001960 .skcipher = {
1961 .base = {
1962 .cra_name = "ctr(aes)",
1963 .cra_driver_name = "ctr-aes-caam",
1964 .cra_blocksize = 1,
1965 },
1966 .setkey = ctr_skcipher_setkey,
1967 .encrypt = skcipher_encrypt,
1968 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001969 .min_keysize = AES_MIN_KEY_SIZE,
1970 .max_keysize = AES_MAX_KEY_SIZE,
1971 .ivsize = AES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001972 .chunksize = AES_BLOCK_SIZE,
1973 },
1974 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1975 OP_ALG_AAI_CTR_MOD128,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001976 },
1977 {
David Brazdil0f672f62019-12-10 10:32:29 +00001978 .skcipher = {
1979 .base = {
1980 .cra_name = "rfc3686(ctr(aes))",
1981 .cra_driver_name = "rfc3686-ctr-aes-caam",
1982 .cra_blocksize = 1,
1983 },
1984 .setkey = rfc3686_skcipher_setkey,
1985 .encrypt = skcipher_encrypt,
1986 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001987 .min_keysize = AES_MIN_KEY_SIZE +
1988 CTR_RFC3686_NONCE_SIZE,
1989 .max_keysize = AES_MAX_KEY_SIZE +
1990 CTR_RFC3686_NONCE_SIZE,
1991 .ivsize = CTR_RFC3686_IV_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001992 .chunksize = AES_BLOCK_SIZE,
1993 },
1994 .caam = {
1995 .class1_alg_type = OP_ALG_ALGSEL_AES |
1996 OP_ALG_AAI_CTR_MOD128,
1997 .rfc3686 = true,
1998 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001999 },
2000 {
David Brazdil0f672f62019-12-10 10:32:29 +00002001 .skcipher = {
2002 .base = {
2003 .cra_name = "xts(aes)",
2004 .cra_driver_name = "xts-aes-caam",
2005 .cra_blocksize = AES_BLOCK_SIZE,
2006 },
2007 .setkey = xts_skcipher_setkey,
2008 .encrypt = skcipher_encrypt,
2009 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002010 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2011 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2012 .ivsize = AES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00002013 },
2014 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2015 },
2016 {
2017 .skcipher = {
2018 .base = {
2019 .cra_name = "ecb(des)",
2020 .cra_driver_name = "ecb-des-caam",
2021 .cra_blocksize = DES_BLOCK_SIZE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002022 },
David Brazdil0f672f62019-12-10 10:32:29 +00002023 .setkey = des_skcipher_setkey,
2024 .encrypt = skcipher_encrypt,
2025 .decrypt = skcipher_decrypt,
2026 .min_keysize = DES_KEY_SIZE,
2027 .max_keysize = DES_KEY_SIZE,
2028 },
2029 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
2030 },
2031 {
2032 .skcipher = {
2033 .base = {
2034 .cra_name = "ecb(aes)",
2035 .cra_driver_name = "ecb-aes-caam",
2036 .cra_blocksize = AES_BLOCK_SIZE,
2037 },
2038 .setkey = aes_skcipher_setkey,
2039 .encrypt = skcipher_encrypt,
2040 .decrypt = skcipher_decrypt,
2041 .min_keysize = AES_MIN_KEY_SIZE,
2042 .max_keysize = AES_MAX_KEY_SIZE,
2043 },
2044 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
2045 },
2046 {
2047 .skcipher = {
2048 .base = {
2049 .cra_name = "ecb(des3_ede)",
2050 .cra_driver_name = "ecb-des3-caam",
2051 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2052 },
2053 .setkey = des3_skcipher_setkey,
2054 .encrypt = skcipher_encrypt,
2055 .decrypt = skcipher_decrypt,
2056 .min_keysize = DES3_EDE_KEY_SIZE,
2057 .max_keysize = DES3_EDE_KEY_SIZE,
2058 },
2059 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
2060 },
2061 {
2062 .skcipher = {
2063 .base = {
2064 .cra_name = "ecb(arc4)",
2065 .cra_driver_name = "ecb-arc4-caam",
2066 .cra_blocksize = ARC4_BLOCK_SIZE,
2067 },
2068 .setkey = arc4_skcipher_setkey,
2069 .encrypt = skcipher_encrypt,
2070 .decrypt = skcipher_decrypt,
2071 .min_keysize = ARC4_MIN_KEY_SIZE,
2072 .max_keysize = ARC4_MAX_KEY_SIZE,
2073 },
2074 .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002075 },
2076};
2077
2078static struct caam_aead_alg driver_aeads[] = {
2079 {
2080 .aead = {
2081 .base = {
2082 .cra_name = "rfc4106(gcm(aes))",
2083 .cra_driver_name = "rfc4106-gcm-aes-caam",
2084 .cra_blocksize = 1,
2085 },
2086 .setkey = rfc4106_setkey,
2087 .setauthsize = rfc4106_setauthsize,
2088 .encrypt = ipsec_gcm_encrypt,
2089 .decrypt = ipsec_gcm_decrypt,
2090 .ivsize = GCM_RFC4106_IV_SIZE,
2091 .maxauthsize = AES_BLOCK_SIZE,
2092 },
2093 .caam = {
2094 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
David Brazdil0f672f62019-12-10 10:32:29 +00002095 .nodkp = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002096 },
2097 },
2098 {
2099 .aead = {
2100 .base = {
2101 .cra_name = "rfc4543(gcm(aes))",
2102 .cra_driver_name = "rfc4543-gcm-aes-caam",
2103 .cra_blocksize = 1,
2104 },
2105 .setkey = rfc4543_setkey,
2106 .setauthsize = rfc4543_setauthsize,
2107 .encrypt = ipsec_gcm_encrypt,
2108 .decrypt = ipsec_gcm_decrypt,
2109 .ivsize = GCM_RFC4543_IV_SIZE,
2110 .maxauthsize = AES_BLOCK_SIZE,
2111 },
2112 .caam = {
2113 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
David Brazdil0f672f62019-12-10 10:32:29 +00002114 .nodkp = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002115 },
2116 },
2117 /* Galois Counter Mode */
2118 {
2119 .aead = {
2120 .base = {
2121 .cra_name = "gcm(aes)",
2122 .cra_driver_name = "gcm-aes-caam",
2123 .cra_blocksize = 1,
2124 },
2125 .setkey = gcm_setkey,
2126 .setauthsize = gcm_setauthsize,
2127 .encrypt = gcm_encrypt,
2128 .decrypt = gcm_decrypt,
2129 .ivsize = GCM_AES_IV_SIZE,
2130 .maxauthsize = AES_BLOCK_SIZE,
2131 },
2132 .caam = {
2133 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
David Brazdil0f672f62019-12-10 10:32:29 +00002134 .nodkp = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002135 },
2136 },
2137 /* single-pass ipsec_esp descriptor */
2138 {
2139 .aead = {
2140 .base = {
2141 .cra_name = "authenc(hmac(md5),"
2142 "ecb(cipher_null))",
2143 .cra_driver_name = "authenc-hmac-md5-"
2144 "ecb-cipher_null-caam",
2145 .cra_blocksize = NULL_BLOCK_SIZE,
2146 },
2147 .setkey = aead_setkey,
2148 .setauthsize = aead_setauthsize,
2149 .encrypt = aead_encrypt,
2150 .decrypt = aead_decrypt,
2151 .ivsize = NULL_IV_SIZE,
2152 .maxauthsize = MD5_DIGEST_SIZE,
2153 },
2154 .caam = {
2155 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2156 OP_ALG_AAI_HMAC_PRECOMP,
2157 },
2158 },
2159 {
2160 .aead = {
2161 .base = {
2162 .cra_name = "authenc(hmac(sha1),"
2163 "ecb(cipher_null))",
2164 .cra_driver_name = "authenc-hmac-sha1-"
2165 "ecb-cipher_null-caam",
2166 .cra_blocksize = NULL_BLOCK_SIZE,
2167 },
2168 .setkey = aead_setkey,
2169 .setauthsize = aead_setauthsize,
2170 .encrypt = aead_encrypt,
2171 .decrypt = aead_decrypt,
2172 .ivsize = NULL_IV_SIZE,
2173 .maxauthsize = SHA1_DIGEST_SIZE,
2174 },
2175 .caam = {
2176 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2177 OP_ALG_AAI_HMAC_PRECOMP,
2178 },
2179 },
2180 {
2181 .aead = {
2182 .base = {
2183 .cra_name = "authenc(hmac(sha224),"
2184 "ecb(cipher_null))",
2185 .cra_driver_name = "authenc-hmac-sha224-"
2186 "ecb-cipher_null-caam",
2187 .cra_blocksize = NULL_BLOCK_SIZE,
2188 },
2189 .setkey = aead_setkey,
2190 .setauthsize = aead_setauthsize,
2191 .encrypt = aead_encrypt,
2192 .decrypt = aead_decrypt,
2193 .ivsize = NULL_IV_SIZE,
2194 .maxauthsize = SHA224_DIGEST_SIZE,
2195 },
2196 .caam = {
2197 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2198 OP_ALG_AAI_HMAC_PRECOMP,
2199 },
2200 },
2201 {
2202 .aead = {
2203 .base = {
2204 .cra_name = "authenc(hmac(sha256),"
2205 "ecb(cipher_null))",
2206 .cra_driver_name = "authenc-hmac-sha256-"
2207 "ecb-cipher_null-caam",
2208 .cra_blocksize = NULL_BLOCK_SIZE,
2209 },
2210 .setkey = aead_setkey,
2211 .setauthsize = aead_setauthsize,
2212 .encrypt = aead_encrypt,
2213 .decrypt = aead_decrypt,
2214 .ivsize = NULL_IV_SIZE,
2215 .maxauthsize = SHA256_DIGEST_SIZE,
2216 },
2217 .caam = {
2218 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2219 OP_ALG_AAI_HMAC_PRECOMP,
2220 },
2221 },
2222 {
2223 .aead = {
2224 .base = {
2225 .cra_name = "authenc(hmac(sha384),"
2226 "ecb(cipher_null))",
2227 .cra_driver_name = "authenc-hmac-sha384-"
2228 "ecb-cipher_null-caam",
2229 .cra_blocksize = NULL_BLOCK_SIZE,
2230 },
2231 .setkey = aead_setkey,
2232 .setauthsize = aead_setauthsize,
2233 .encrypt = aead_encrypt,
2234 .decrypt = aead_decrypt,
2235 .ivsize = NULL_IV_SIZE,
2236 .maxauthsize = SHA384_DIGEST_SIZE,
2237 },
2238 .caam = {
2239 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2240 OP_ALG_AAI_HMAC_PRECOMP,
2241 },
2242 },
2243 {
2244 .aead = {
2245 .base = {
2246 .cra_name = "authenc(hmac(sha512),"
2247 "ecb(cipher_null))",
2248 .cra_driver_name = "authenc-hmac-sha512-"
2249 "ecb-cipher_null-caam",
2250 .cra_blocksize = NULL_BLOCK_SIZE,
2251 },
2252 .setkey = aead_setkey,
2253 .setauthsize = aead_setauthsize,
2254 .encrypt = aead_encrypt,
2255 .decrypt = aead_decrypt,
2256 .ivsize = NULL_IV_SIZE,
2257 .maxauthsize = SHA512_DIGEST_SIZE,
2258 },
2259 .caam = {
2260 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2261 OP_ALG_AAI_HMAC_PRECOMP,
2262 },
2263 },
2264 {
2265 .aead = {
2266 .base = {
2267 .cra_name = "authenc(hmac(md5),cbc(aes))",
2268 .cra_driver_name = "authenc-hmac-md5-"
2269 "cbc-aes-caam",
2270 .cra_blocksize = AES_BLOCK_SIZE,
2271 },
2272 .setkey = aead_setkey,
2273 .setauthsize = aead_setauthsize,
2274 .encrypt = aead_encrypt,
2275 .decrypt = aead_decrypt,
2276 .ivsize = AES_BLOCK_SIZE,
2277 .maxauthsize = MD5_DIGEST_SIZE,
2278 },
2279 .caam = {
2280 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2281 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2282 OP_ALG_AAI_HMAC_PRECOMP,
2283 },
2284 },
2285 {
2286 .aead = {
2287 .base = {
2288 .cra_name = "echainiv(authenc(hmac(md5),"
2289 "cbc(aes)))",
2290 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2291 "cbc-aes-caam",
2292 .cra_blocksize = AES_BLOCK_SIZE,
2293 },
2294 .setkey = aead_setkey,
2295 .setauthsize = aead_setauthsize,
2296 .encrypt = aead_encrypt,
2297 .decrypt = aead_decrypt,
2298 .ivsize = AES_BLOCK_SIZE,
2299 .maxauthsize = MD5_DIGEST_SIZE,
2300 },
2301 .caam = {
2302 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2303 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2304 OP_ALG_AAI_HMAC_PRECOMP,
2305 .geniv = true,
2306 },
2307 },
2308 {
2309 .aead = {
2310 .base = {
2311 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2312 .cra_driver_name = "authenc-hmac-sha1-"
2313 "cbc-aes-caam",
2314 .cra_blocksize = AES_BLOCK_SIZE,
2315 },
2316 .setkey = aead_setkey,
2317 .setauthsize = aead_setauthsize,
2318 .encrypt = aead_encrypt,
2319 .decrypt = aead_decrypt,
2320 .ivsize = AES_BLOCK_SIZE,
2321 .maxauthsize = SHA1_DIGEST_SIZE,
2322 },
2323 .caam = {
2324 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2325 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2326 OP_ALG_AAI_HMAC_PRECOMP,
2327 },
2328 },
2329 {
2330 .aead = {
2331 .base = {
2332 .cra_name = "echainiv(authenc(hmac(sha1),"
2333 "cbc(aes)))",
2334 .cra_driver_name = "echainiv-authenc-"
2335 "hmac-sha1-cbc-aes-caam",
2336 .cra_blocksize = AES_BLOCK_SIZE,
2337 },
2338 .setkey = aead_setkey,
2339 .setauthsize = aead_setauthsize,
2340 .encrypt = aead_encrypt,
2341 .decrypt = aead_decrypt,
2342 .ivsize = AES_BLOCK_SIZE,
2343 .maxauthsize = SHA1_DIGEST_SIZE,
2344 },
2345 .caam = {
2346 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2347 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2348 OP_ALG_AAI_HMAC_PRECOMP,
2349 .geniv = true,
2350 },
2351 },
2352 {
2353 .aead = {
2354 .base = {
2355 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2356 .cra_driver_name = "authenc-hmac-sha224-"
2357 "cbc-aes-caam",
2358 .cra_blocksize = AES_BLOCK_SIZE,
2359 },
2360 .setkey = aead_setkey,
2361 .setauthsize = aead_setauthsize,
2362 .encrypt = aead_encrypt,
2363 .decrypt = aead_decrypt,
2364 .ivsize = AES_BLOCK_SIZE,
2365 .maxauthsize = SHA224_DIGEST_SIZE,
2366 },
2367 .caam = {
2368 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2369 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2370 OP_ALG_AAI_HMAC_PRECOMP,
2371 },
2372 },
2373 {
2374 .aead = {
2375 .base = {
2376 .cra_name = "echainiv(authenc(hmac(sha224),"
2377 "cbc(aes)))",
2378 .cra_driver_name = "echainiv-authenc-"
2379 "hmac-sha224-cbc-aes-caam",
2380 .cra_blocksize = AES_BLOCK_SIZE,
2381 },
2382 .setkey = aead_setkey,
2383 .setauthsize = aead_setauthsize,
2384 .encrypt = aead_encrypt,
2385 .decrypt = aead_decrypt,
2386 .ivsize = AES_BLOCK_SIZE,
2387 .maxauthsize = SHA224_DIGEST_SIZE,
2388 },
2389 .caam = {
2390 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2391 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2392 OP_ALG_AAI_HMAC_PRECOMP,
2393 .geniv = true,
2394 },
2395 },
2396 {
2397 .aead = {
2398 .base = {
2399 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2400 .cra_driver_name = "authenc-hmac-sha256-"
2401 "cbc-aes-caam",
2402 .cra_blocksize = AES_BLOCK_SIZE,
2403 },
2404 .setkey = aead_setkey,
2405 .setauthsize = aead_setauthsize,
2406 .encrypt = aead_encrypt,
2407 .decrypt = aead_decrypt,
2408 .ivsize = AES_BLOCK_SIZE,
2409 .maxauthsize = SHA256_DIGEST_SIZE,
2410 },
2411 .caam = {
2412 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2413 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2414 OP_ALG_AAI_HMAC_PRECOMP,
2415 },
2416 },
2417 {
2418 .aead = {
2419 .base = {
2420 .cra_name = "echainiv(authenc(hmac(sha256),"
2421 "cbc(aes)))",
2422 .cra_driver_name = "echainiv-authenc-"
2423 "hmac-sha256-cbc-aes-caam",
2424 .cra_blocksize = AES_BLOCK_SIZE,
2425 },
2426 .setkey = aead_setkey,
2427 .setauthsize = aead_setauthsize,
2428 .encrypt = aead_encrypt,
2429 .decrypt = aead_decrypt,
2430 .ivsize = AES_BLOCK_SIZE,
2431 .maxauthsize = SHA256_DIGEST_SIZE,
2432 },
2433 .caam = {
2434 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2435 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2436 OP_ALG_AAI_HMAC_PRECOMP,
2437 .geniv = true,
2438 },
2439 },
2440 {
2441 .aead = {
2442 .base = {
2443 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2444 .cra_driver_name = "authenc-hmac-sha384-"
2445 "cbc-aes-caam",
2446 .cra_blocksize = AES_BLOCK_SIZE,
2447 },
2448 .setkey = aead_setkey,
2449 .setauthsize = aead_setauthsize,
2450 .encrypt = aead_encrypt,
2451 .decrypt = aead_decrypt,
2452 .ivsize = AES_BLOCK_SIZE,
2453 .maxauthsize = SHA384_DIGEST_SIZE,
2454 },
2455 .caam = {
2456 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2457 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2458 OP_ALG_AAI_HMAC_PRECOMP,
2459 },
2460 },
2461 {
2462 .aead = {
2463 .base = {
2464 .cra_name = "echainiv(authenc(hmac(sha384),"
2465 "cbc(aes)))",
2466 .cra_driver_name = "echainiv-authenc-"
2467 "hmac-sha384-cbc-aes-caam",
2468 .cra_blocksize = AES_BLOCK_SIZE,
2469 },
2470 .setkey = aead_setkey,
2471 .setauthsize = aead_setauthsize,
2472 .encrypt = aead_encrypt,
2473 .decrypt = aead_decrypt,
2474 .ivsize = AES_BLOCK_SIZE,
2475 .maxauthsize = SHA384_DIGEST_SIZE,
2476 },
2477 .caam = {
2478 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2479 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2480 OP_ALG_AAI_HMAC_PRECOMP,
2481 .geniv = true,
2482 },
2483 },
2484 {
2485 .aead = {
2486 .base = {
2487 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2488 .cra_driver_name = "authenc-hmac-sha512-"
2489 "cbc-aes-caam",
2490 .cra_blocksize = AES_BLOCK_SIZE,
2491 },
2492 .setkey = aead_setkey,
2493 .setauthsize = aead_setauthsize,
2494 .encrypt = aead_encrypt,
2495 .decrypt = aead_decrypt,
2496 .ivsize = AES_BLOCK_SIZE,
2497 .maxauthsize = SHA512_DIGEST_SIZE,
2498 },
2499 .caam = {
2500 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2501 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2502 OP_ALG_AAI_HMAC_PRECOMP,
2503 },
2504 },
2505 {
2506 .aead = {
2507 .base = {
2508 .cra_name = "echainiv(authenc(hmac(sha512),"
2509 "cbc(aes)))",
2510 .cra_driver_name = "echainiv-authenc-"
2511 "hmac-sha512-cbc-aes-caam",
2512 .cra_blocksize = AES_BLOCK_SIZE,
2513 },
2514 .setkey = aead_setkey,
2515 .setauthsize = aead_setauthsize,
2516 .encrypt = aead_encrypt,
2517 .decrypt = aead_decrypt,
2518 .ivsize = AES_BLOCK_SIZE,
2519 .maxauthsize = SHA512_DIGEST_SIZE,
2520 },
2521 .caam = {
2522 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2523 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2524 OP_ALG_AAI_HMAC_PRECOMP,
2525 .geniv = true,
2526 },
2527 },
2528 {
2529 .aead = {
2530 .base = {
2531 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2532 .cra_driver_name = "authenc-hmac-md5-"
2533 "cbc-des3_ede-caam",
2534 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2535 },
David Brazdil0f672f62019-12-10 10:32:29 +00002536 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002537 .setauthsize = aead_setauthsize,
2538 .encrypt = aead_encrypt,
2539 .decrypt = aead_decrypt,
2540 .ivsize = DES3_EDE_BLOCK_SIZE,
2541 .maxauthsize = MD5_DIGEST_SIZE,
2542 },
2543 .caam = {
2544 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2545 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2546 OP_ALG_AAI_HMAC_PRECOMP,
2547 }
2548 },
2549 {
2550 .aead = {
2551 .base = {
2552 .cra_name = "echainiv(authenc(hmac(md5),"
2553 "cbc(des3_ede)))",
2554 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2555 "cbc-des3_ede-caam",
2556 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2557 },
David Brazdil0f672f62019-12-10 10:32:29 +00002558 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002559 .setauthsize = aead_setauthsize,
2560 .encrypt = aead_encrypt,
2561 .decrypt = aead_decrypt,
2562 .ivsize = DES3_EDE_BLOCK_SIZE,
2563 .maxauthsize = MD5_DIGEST_SIZE,
2564 },
2565 .caam = {
2566 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2567 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2568 OP_ALG_AAI_HMAC_PRECOMP,
2569 .geniv = true,
2570 }
2571 },
2572 {
2573 .aead = {
2574 .base = {
2575 .cra_name = "authenc(hmac(sha1),"
2576 "cbc(des3_ede))",
2577 .cra_driver_name = "authenc-hmac-sha1-"
2578 "cbc-des3_ede-caam",
2579 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2580 },
David Brazdil0f672f62019-12-10 10:32:29 +00002581 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002582 .setauthsize = aead_setauthsize,
2583 .encrypt = aead_encrypt,
2584 .decrypt = aead_decrypt,
2585 .ivsize = DES3_EDE_BLOCK_SIZE,
2586 .maxauthsize = SHA1_DIGEST_SIZE,
2587 },
2588 .caam = {
2589 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2590 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2591 OP_ALG_AAI_HMAC_PRECOMP,
2592 },
2593 },
2594 {
2595 .aead = {
2596 .base = {
2597 .cra_name = "echainiv(authenc(hmac(sha1),"
2598 "cbc(des3_ede)))",
2599 .cra_driver_name = "echainiv-authenc-"
2600 "hmac-sha1-"
2601 "cbc-des3_ede-caam",
2602 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2603 },
David Brazdil0f672f62019-12-10 10:32:29 +00002604 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002605 .setauthsize = aead_setauthsize,
2606 .encrypt = aead_encrypt,
2607 .decrypt = aead_decrypt,
2608 .ivsize = DES3_EDE_BLOCK_SIZE,
2609 .maxauthsize = SHA1_DIGEST_SIZE,
2610 },
2611 .caam = {
2612 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2613 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2614 OP_ALG_AAI_HMAC_PRECOMP,
2615 .geniv = true,
2616 },
2617 },
2618 {
2619 .aead = {
2620 .base = {
2621 .cra_name = "authenc(hmac(sha224),"
2622 "cbc(des3_ede))",
2623 .cra_driver_name = "authenc-hmac-sha224-"
2624 "cbc-des3_ede-caam",
2625 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2626 },
David Brazdil0f672f62019-12-10 10:32:29 +00002627 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002628 .setauthsize = aead_setauthsize,
2629 .encrypt = aead_encrypt,
2630 .decrypt = aead_decrypt,
2631 .ivsize = DES3_EDE_BLOCK_SIZE,
2632 .maxauthsize = SHA224_DIGEST_SIZE,
2633 },
2634 .caam = {
2635 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2636 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2637 OP_ALG_AAI_HMAC_PRECOMP,
2638 },
2639 },
2640 {
2641 .aead = {
2642 .base = {
2643 .cra_name = "echainiv(authenc(hmac(sha224),"
2644 "cbc(des3_ede)))",
2645 .cra_driver_name = "echainiv-authenc-"
2646 "hmac-sha224-"
2647 "cbc-des3_ede-caam",
2648 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2649 },
David Brazdil0f672f62019-12-10 10:32:29 +00002650 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002651 .setauthsize = aead_setauthsize,
2652 .encrypt = aead_encrypt,
2653 .decrypt = aead_decrypt,
2654 .ivsize = DES3_EDE_BLOCK_SIZE,
2655 .maxauthsize = SHA224_DIGEST_SIZE,
2656 },
2657 .caam = {
2658 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2659 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2660 OP_ALG_AAI_HMAC_PRECOMP,
2661 .geniv = true,
2662 },
2663 },
2664 {
2665 .aead = {
2666 .base = {
2667 .cra_name = "authenc(hmac(sha256),"
2668 "cbc(des3_ede))",
2669 .cra_driver_name = "authenc-hmac-sha256-"
2670 "cbc-des3_ede-caam",
2671 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2672 },
David Brazdil0f672f62019-12-10 10:32:29 +00002673 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002674 .setauthsize = aead_setauthsize,
2675 .encrypt = aead_encrypt,
2676 .decrypt = aead_decrypt,
2677 .ivsize = DES3_EDE_BLOCK_SIZE,
2678 .maxauthsize = SHA256_DIGEST_SIZE,
2679 },
2680 .caam = {
2681 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2682 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2683 OP_ALG_AAI_HMAC_PRECOMP,
2684 },
2685 },
2686 {
2687 .aead = {
2688 .base = {
2689 .cra_name = "echainiv(authenc(hmac(sha256),"
2690 "cbc(des3_ede)))",
2691 .cra_driver_name = "echainiv-authenc-"
2692 "hmac-sha256-"
2693 "cbc-des3_ede-caam",
2694 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2695 },
David Brazdil0f672f62019-12-10 10:32:29 +00002696 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002697 .setauthsize = aead_setauthsize,
2698 .encrypt = aead_encrypt,
2699 .decrypt = aead_decrypt,
2700 .ivsize = DES3_EDE_BLOCK_SIZE,
2701 .maxauthsize = SHA256_DIGEST_SIZE,
2702 },
2703 .caam = {
2704 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2705 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2706 OP_ALG_AAI_HMAC_PRECOMP,
2707 .geniv = true,
2708 },
2709 },
2710 {
2711 .aead = {
2712 .base = {
2713 .cra_name = "authenc(hmac(sha384),"
2714 "cbc(des3_ede))",
2715 .cra_driver_name = "authenc-hmac-sha384-"
2716 "cbc-des3_ede-caam",
2717 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2718 },
David Brazdil0f672f62019-12-10 10:32:29 +00002719 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002720 .setauthsize = aead_setauthsize,
2721 .encrypt = aead_encrypt,
2722 .decrypt = aead_decrypt,
2723 .ivsize = DES3_EDE_BLOCK_SIZE,
2724 .maxauthsize = SHA384_DIGEST_SIZE,
2725 },
2726 .caam = {
2727 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2728 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2729 OP_ALG_AAI_HMAC_PRECOMP,
2730 },
2731 },
2732 {
2733 .aead = {
2734 .base = {
2735 .cra_name = "echainiv(authenc(hmac(sha384),"
2736 "cbc(des3_ede)))",
2737 .cra_driver_name = "echainiv-authenc-"
2738 "hmac-sha384-"
2739 "cbc-des3_ede-caam",
2740 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2741 },
David Brazdil0f672f62019-12-10 10:32:29 +00002742 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002743 .setauthsize = aead_setauthsize,
2744 .encrypt = aead_encrypt,
2745 .decrypt = aead_decrypt,
2746 .ivsize = DES3_EDE_BLOCK_SIZE,
2747 .maxauthsize = SHA384_DIGEST_SIZE,
2748 },
2749 .caam = {
2750 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2751 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2752 OP_ALG_AAI_HMAC_PRECOMP,
2753 .geniv = true,
2754 },
2755 },
2756 {
2757 .aead = {
2758 .base = {
2759 .cra_name = "authenc(hmac(sha512),"
2760 "cbc(des3_ede))",
2761 .cra_driver_name = "authenc-hmac-sha512-"
2762 "cbc-des3_ede-caam",
2763 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2764 },
David Brazdil0f672f62019-12-10 10:32:29 +00002765 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002766 .setauthsize = aead_setauthsize,
2767 .encrypt = aead_encrypt,
2768 .decrypt = aead_decrypt,
2769 .ivsize = DES3_EDE_BLOCK_SIZE,
2770 .maxauthsize = SHA512_DIGEST_SIZE,
2771 },
2772 .caam = {
2773 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2774 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2775 OP_ALG_AAI_HMAC_PRECOMP,
2776 },
2777 },
2778 {
2779 .aead = {
2780 .base = {
2781 .cra_name = "echainiv(authenc(hmac(sha512),"
2782 "cbc(des3_ede)))",
2783 .cra_driver_name = "echainiv-authenc-"
2784 "hmac-sha512-"
2785 "cbc-des3_ede-caam",
2786 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2787 },
David Brazdil0f672f62019-12-10 10:32:29 +00002788 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002789 .setauthsize = aead_setauthsize,
2790 .encrypt = aead_encrypt,
2791 .decrypt = aead_decrypt,
2792 .ivsize = DES3_EDE_BLOCK_SIZE,
2793 .maxauthsize = SHA512_DIGEST_SIZE,
2794 },
2795 .caam = {
2796 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2797 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2798 OP_ALG_AAI_HMAC_PRECOMP,
2799 .geniv = true,
2800 },
2801 },
2802 {
2803 .aead = {
2804 .base = {
2805 .cra_name = "authenc(hmac(md5),cbc(des))",
2806 .cra_driver_name = "authenc-hmac-md5-"
2807 "cbc-des-caam",
2808 .cra_blocksize = DES_BLOCK_SIZE,
2809 },
2810 .setkey = aead_setkey,
2811 .setauthsize = aead_setauthsize,
2812 .encrypt = aead_encrypt,
2813 .decrypt = aead_decrypt,
2814 .ivsize = DES_BLOCK_SIZE,
2815 .maxauthsize = MD5_DIGEST_SIZE,
2816 },
2817 .caam = {
2818 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2819 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2820 OP_ALG_AAI_HMAC_PRECOMP,
2821 },
2822 },
2823 {
2824 .aead = {
2825 .base = {
2826 .cra_name = "echainiv(authenc(hmac(md5),"
2827 "cbc(des)))",
2828 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2829 "cbc-des-caam",
2830 .cra_blocksize = DES_BLOCK_SIZE,
2831 },
2832 .setkey = aead_setkey,
2833 .setauthsize = aead_setauthsize,
2834 .encrypt = aead_encrypt,
2835 .decrypt = aead_decrypt,
2836 .ivsize = DES_BLOCK_SIZE,
2837 .maxauthsize = MD5_DIGEST_SIZE,
2838 },
2839 .caam = {
2840 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2841 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2842 OP_ALG_AAI_HMAC_PRECOMP,
2843 .geniv = true,
2844 },
2845 },
2846 {
2847 .aead = {
2848 .base = {
2849 .cra_name = "authenc(hmac(sha1),cbc(des))",
2850 .cra_driver_name = "authenc-hmac-sha1-"
2851 "cbc-des-caam",
2852 .cra_blocksize = DES_BLOCK_SIZE,
2853 },
2854 .setkey = aead_setkey,
2855 .setauthsize = aead_setauthsize,
2856 .encrypt = aead_encrypt,
2857 .decrypt = aead_decrypt,
2858 .ivsize = DES_BLOCK_SIZE,
2859 .maxauthsize = SHA1_DIGEST_SIZE,
2860 },
2861 .caam = {
2862 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2863 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2864 OP_ALG_AAI_HMAC_PRECOMP,
2865 },
2866 },
2867 {
2868 .aead = {
2869 .base = {
2870 .cra_name = "echainiv(authenc(hmac(sha1),"
2871 "cbc(des)))",
2872 .cra_driver_name = "echainiv-authenc-"
2873 "hmac-sha1-cbc-des-caam",
2874 .cra_blocksize = DES_BLOCK_SIZE,
2875 },
2876 .setkey = aead_setkey,
2877 .setauthsize = aead_setauthsize,
2878 .encrypt = aead_encrypt,
2879 .decrypt = aead_decrypt,
2880 .ivsize = DES_BLOCK_SIZE,
2881 .maxauthsize = SHA1_DIGEST_SIZE,
2882 },
2883 .caam = {
2884 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2885 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2886 OP_ALG_AAI_HMAC_PRECOMP,
2887 .geniv = true,
2888 },
2889 },
2890 {
2891 .aead = {
2892 .base = {
2893 .cra_name = "authenc(hmac(sha224),cbc(des))",
2894 .cra_driver_name = "authenc-hmac-sha224-"
2895 "cbc-des-caam",
2896 .cra_blocksize = DES_BLOCK_SIZE,
2897 },
2898 .setkey = aead_setkey,
2899 .setauthsize = aead_setauthsize,
2900 .encrypt = aead_encrypt,
2901 .decrypt = aead_decrypt,
2902 .ivsize = DES_BLOCK_SIZE,
2903 .maxauthsize = SHA224_DIGEST_SIZE,
2904 },
2905 .caam = {
2906 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2907 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2908 OP_ALG_AAI_HMAC_PRECOMP,
2909 },
2910 },
2911 {
2912 .aead = {
2913 .base = {
2914 .cra_name = "echainiv(authenc(hmac(sha224),"
2915 "cbc(des)))",
2916 .cra_driver_name = "echainiv-authenc-"
2917 "hmac-sha224-cbc-des-caam",
2918 .cra_blocksize = DES_BLOCK_SIZE,
2919 },
2920 .setkey = aead_setkey,
2921 .setauthsize = aead_setauthsize,
2922 .encrypt = aead_encrypt,
2923 .decrypt = aead_decrypt,
2924 .ivsize = DES_BLOCK_SIZE,
2925 .maxauthsize = SHA224_DIGEST_SIZE,
2926 },
2927 .caam = {
2928 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2929 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2930 OP_ALG_AAI_HMAC_PRECOMP,
2931 .geniv = true,
2932 },
2933 },
2934 {
2935 .aead = {
2936 .base = {
2937 .cra_name = "authenc(hmac(sha256),cbc(des))",
2938 .cra_driver_name = "authenc-hmac-sha256-"
2939 "cbc-des-caam",
2940 .cra_blocksize = DES_BLOCK_SIZE,
2941 },
2942 .setkey = aead_setkey,
2943 .setauthsize = aead_setauthsize,
2944 .encrypt = aead_encrypt,
2945 .decrypt = aead_decrypt,
2946 .ivsize = DES_BLOCK_SIZE,
2947 .maxauthsize = SHA256_DIGEST_SIZE,
2948 },
2949 .caam = {
2950 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2951 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2952 OP_ALG_AAI_HMAC_PRECOMP,
2953 },
2954 },
2955 {
2956 .aead = {
2957 .base = {
2958 .cra_name = "echainiv(authenc(hmac(sha256),"
2959 "cbc(des)))",
2960 .cra_driver_name = "echainiv-authenc-"
2961 "hmac-sha256-cbc-des-caam",
2962 .cra_blocksize = DES_BLOCK_SIZE,
2963 },
2964 .setkey = aead_setkey,
2965 .setauthsize = aead_setauthsize,
2966 .encrypt = aead_encrypt,
2967 .decrypt = aead_decrypt,
2968 .ivsize = DES_BLOCK_SIZE,
2969 .maxauthsize = SHA256_DIGEST_SIZE,
2970 },
2971 .caam = {
2972 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2973 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2974 OP_ALG_AAI_HMAC_PRECOMP,
2975 .geniv = true,
2976 },
2977 },
2978 {
2979 .aead = {
2980 .base = {
2981 .cra_name = "authenc(hmac(sha384),cbc(des))",
2982 .cra_driver_name = "authenc-hmac-sha384-"
2983 "cbc-des-caam",
2984 .cra_blocksize = DES_BLOCK_SIZE,
2985 },
2986 .setkey = aead_setkey,
2987 .setauthsize = aead_setauthsize,
2988 .encrypt = aead_encrypt,
2989 .decrypt = aead_decrypt,
2990 .ivsize = DES_BLOCK_SIZE,
2991 .maxauthsize = SHA384_DIGEST_SIZE,
2992 },
2993 .caam = {
2994 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2995 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2996 OP_ALG_AAI_HMAC_PRECOMP,
2997 },
2998 },
2999 {
3000 .aead = {
3001 .base = {
3002 .cra_name = "echainiv(authenc(hmac(sha384),"
3003 "cbc(des)))",
3004 .cra_driver_name = "echainiv-authenc-"
3005 "hmac-sha384-cbc-des-caam",
3006 .cra_blocksize = DES_BLOCK_SIZE,
3007 },
3008 .setkey = aead_setkey,
3009 .setauthsize = aead_setauthsize,
3010 .encrypt = aead_encrypt,
3011 .decrypt = aead_decrypt,
3012 .ivsize = DES_BLOCK_SIZE,
3013 .maxauthsize = SHA384_DIGEST_SIZE,
3014 },
3015 .caam = {
3016 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3017 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3018 OP_ALG_AAI_HMAC_PRECOMP,
3019 .geniv = true,
3020 },
3021 },
3022 {
3023 .aead = {
3024 .base = {
3025 .cra_name = "authenc(hmac(sha512),cbc(des))",
3026 .cra_driver_name = "authenc-hmac-sha512-"
3027 "cbc-des-caam",
3028 .cra_blocksize = DES_BLOCK_SIZE,
3029 },
3030 .setkey = aead_setkey,
3031 .setauthsize = aead_setauthsize,
3032 .encrypt = aead_encrypt,
3033 .decrypt = aead_decrypt,
3034 .ivsize = DES_BLOCK_SIZE,
3035 .maxauthsize = SHA512_DIGEST_SIZE,
3036 },
3037 .caam = {
3038 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3039 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3040 OP_ALG_AAI_HMAC_PRECOMP,
3041 },
3042 },
3043 {
3044 .aead = {
3045 .base = {
3046 .cra_name = "echainiv(authenc(hmac(sha512),"
3047 "cbc(des)))",
3048 .cra_driver_name = "echainiv-authenc-"
3049 "hmac-sha512-cbc-des-caam",
3050 .cra_blocksize = DES_BLOCK_SIZE,
3051 },
3052 .setkey = aead_setkey,
3053 .setauthsize = aead_setauthsize,
3054 .encrypt = aead_encrypt,
3055 .decrypt = aead_decrypt,
3056 .ivsize = DES_BLOCK_SIZE,
3057 .maxauthsize = SHA512_DIGEST_SIZE,
3058 },
3059 .caam = {
3060 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3061 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3062 OP_ALG_AAI_HMAC_PRECOMP,
3063 .geniv = true,
3064 },
3065 },
3066 {
3067 .aead = {
3068 .base = {
3069 .cra_name = "authenc(hmac(md5),"
3070 "rfc3686(ctr(aes)))",
3071 .cra_driver_name = "authenc-hmac-md5-"
3072 "rfc3686-ctr-aes-caam",
3073 .cra_blocksize = 1,
3074 },
3075 .setkey = aead_setkey,
3076 .setauthsize = aead_setauthsize,
3077 .encrypt = aead_encrypt,
3078 .decrypt = aead_decrypt,
3079 .ivsize = CTR_RFC3686_IV_SIZE,
3080 .maxauthsize = MD5_DIGEST_SIZE,
3081 },
3082 .caam = {
3083 .class1_alg_type = OP_ALG_ALGSEL_AES |
3084 OP_ALG_AAI_CTR_MOD128,
3085 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3086 OP_ALG_AAI_HMAC_PRECOMP,
3087 .rfc3686 = true,
3088 },
3089 },
3090 {
3091 .aead = {
3092 .base = {
3093 .cra_name = "seqiv(authenc("
3094 "hmac(md5),rfc3686(ctr(aes))))",
3095 .cra_driver_name = "seqiv-authenc-hmac-md5-"
3096 "rfc3686-ctr-aes-caam",
3097 .cra_blocksize = 1,
3098 },
3099 .setkey = aead_setkey,
3100 .setauthsize = aead_setauthsize,
3101 .encrypt = aead_encrypt,
3102 .decrypt = aead_decrypt,
3103 .ivsize = CTR_RFC3686_IV_SIZE,
3104 .maxauthsize = MD5_DIGEST_SIZE,
3105 },
3106 .caam = {
3107 .class1_alg_type = OP_ALG_ALGSEL_AES |
3108 OP_ALG_AAI_CTR_MOD128,
3109 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3110 OP_ALG_AAI_HMAC_PRECOMP,
3111 .rfc3686 = true,
3112 .geniv = true,
3113 },
3114 },
3115 {
3116 .aead = {
3117 .base = {
3118 .cra_name = "authenc(hmac(sha1),"
3119 "rfc3686(ctr(aes)))",
3120 .cra_driver_name = "authenc-hmac-sha1-"
3121 "rfc3686-ctr-aes-caam",
3122 .cra_blocksize = 1,
3123 },
3124 .setkey = aead_setkey,
3125 .setauthsize = aead_setauthsize,
3126 .encrypt = aead_encrypt,
3127 .decrypt = aead_decrypt,
3128 .ivsize = CTR_RFC3686_IV_SIZE,
3129 .maxauthsize = SHA1_DIGEST_SIZE,
3130 },
3131 .caam = {
3132 .class1_alg_type = OP_ALG_ALGSEL_AES |
3133 OP_ALG_AAI_CTR_MOD128,
3134 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3135 OP_ALG_AAI_HMAC_PRECOMP,
3136 .rfc3686 = true,
3137 },
3138 },
3139 {
3140 .aead = {
3141 .base = {
3142 .cra_name = "seqiv(authenc("
3143 "hmac(sha1),rfc3686(ctr(aes))))",
3144 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
3145 "rfc3686-ctr-aes-caam",
3146 .cra_blocksize = 1,
3147 },
3148 .setkey = aead_setkey,
3149 .setauthsize = aead_setauthsize,
3150 .encrypt = aead_encrypt,
3151 .decrypt = aead_decrypt,
3152 .ivsize = CTR_RFC3686_IV_SIZE,
3153 .maxauthsize = SHA1_DIGEST_SIZE,
3154 },
3155 .caam = {
3156 .class1_alg_type = OP_ALG_ALGSEL_AES |
3157 OP_ALG_AAI_CTR_MOD128,
3158 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3159 OP_ALG_AAI_HMAC_PRECOMP,
3160 .rfc3686 = true,
3161 .geniv = true,
3162 },
3163 },
3164 {
3165 .aead = {
3166 .base = {
3167 .cra_name = "authenc(hmac(sha224),"
3168 "rfc3686(ctr(aes)))",
3169 .cra_driver_name = "authenc-hmac-sha224-"
3170 "rfc3686-ctr-aes-caam",
3171 .cra_blocksize = 1,
3172 },
3173 .setkey = aead_setkey,
3174 .setauthsize = aead_setauthsize,
3175 .encrypt = aead_encrypt,
3176 .decrypt = aead_decrypt,
3177 .ivsize = CTR_RFC3686_IV_SIZE,
3178 .maxauthsize = SHA224_DIGEST_SIZE,
3179 },
3180 .caam = {
3181 .class1_alg_type = OP_ALG_ALGSEL_AES |
3182 OP_ALG_AAI_CTR_MOD128,
3183 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3184 OP_ALG_AAI_HMAC_PRECOMP,
3185 .rfc3686 = true,
3186 },
3187 },
3188 {
3189 .aead = {
3190 .base = {
3191 .cra_name = "seqiv(authenc("
3192 "hmac(sha224),rfc3686(ctr(aes))))",
3193 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
3194 "rfc3686-ctr-aes-caam",
3195 .cra_blocksize = 1,
3196 },
3197 .setkey = aead_setkey,
3198 .setauthsize = aead_setauthsize,
3199 .encrypt = aead_encrypt,
3200 .decrypt = aead_decrypt,
3201 .ivsize = CTR_RFC3686_IV_SIZE,
3202 .maxauthsize = SHA224_DIGEST_SIZE,
3203 },
3204 .caam = {
3205 .class1_alg_type = OP_ALG_ALGSEL_AES |
3206 OP_ALG_AAI_CTR_MOD128,
3207 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3208 OP_ALG_AAI_HMAC_PRECOMP,
3209 .rfc3686 = true,
3210 .geniv = true,
3211 },
3212 },
3213 {
3214 .aead = {
3215 .base = {
3216 .cra_name = "authenc(hmac(sha256),"
3217 "rfc3686(ctr(aes)))",
3218 .cra_driver_name = "authenc-hmac-sha256-"
3219 "rfc3686-ctr-aes-caam",
3220 .cra_blocksize = 1,
3221 },
3222 .setkey = aead_setkey,
3223 .setauthsize = aead_setauthsize,
3224 .encrypt = aead_encrypt,
3225 .decrypt = aead_decrypt,
3226 .ivsize = CTR_RFC3686_IV_SIZE,
3227 .maxauthsize = SHA256_DIGEST_SIZE,
3228 },
3229 .caam = {
3230 .class1_alg_type = OP_ALG_ALGSEL_AES |
3231 OP_ALG_AAI_CTR_MOD128,
3232 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3233 OP_ALG_AAI_HMAC_PRECOMP,
3234 .rfc3686 = true,
3235 },
3236 },
3237 {
3238 .aead = {
3239 .base = {
3240 .cra_name = "seqiv(authenc(hmac(sha256),"
3241 "rfc3686(ctr(aes))))",
3242 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
3243 "rfc3686-ctr-aes-caam",
3244 .cra_blocksize = 1,
3245 },
3246 .setkey = aead_setkey,
3247 .setauthsize = aead_setauthsize,
3248 .encrypt = aead_encrypt,
3249 .decrypt = aead_decrypt,
3250 .ivsize = CTR_RFC3686_IV_SIZE,
3251 .maxauthsize = SHA256_DIGEST_SIZE,
3252 },
3253 .caam = {
3254 .class1_alg_type = OP_ALG_ALGSEL_AES |
3255 OP_ALG_AAI_CTR_MOD128,
3256 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3257 OP_ALG_AAI_HMAC_PRECOMP,
3258 .rfc3686 = true,
3259 .geniv = true,
3260 },
3261 },
3262 {
3263 .aead = {
3264 .base = {
3265 .cra_name = "authenc(hmac(sha384),"
3266 "rfc3686(ctr(aes)))",
3267 .cra_driver_name = "authenc-hmac-sha384-"
3268 "rfc3686-ctr-aes-caam",
3269 .cra_blocksize = 1,
3270 },
3271 .setkey = aead_setkey,
3272 .setauthsize = aead_setauthsize,
3273 .encrypt = aead_encrypt,
3274 .decrypt = aead_decrypt,
3275 .ivsize = CTR_RFC3686_IV_SIZE,
3276 .maxauthsize = SHA384_DIGEST_SIZE,
3277 },
3278 .caam = {
3279 .class1_alg_type = OP_ALG_ALGSEL_AES |
3280 OP_ALG_AAI_CTR_MOD128,
3281 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3282 OP_ALG_AAI_HMAC_PRECOMP,
3283 .rfc3686 = true,
3284 },
3285 },
3286 {
3287 .aead = {
3288 .base = {
3289 .cra_name = "seqiv(authenc(hmac(sha384),"
3290 "rfc3686(ctr(aes))))",
3291 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
3292 "rfc3686-ctr-aes-caam",
3293 .cra_blocksize = 1,
3294 },
3295 .setkey = aead_setkey,
3296 .setauthsize = aead_setauthsize,
3297 .encrypt = aead_encrypt,
3298 .decrypt = aead_decrypt,
3299 .ivsize = CTR_RFC3686_IV_SIZE,
3300 .maxauthsize = SHA384_DIGEST_SIZE,
3301 },
3302 .caam = {
3303 .class1_alg_type = OP_ALG_ALGSEL_AES |
3304 OP_ALG_AAI_CTR_MOD128,
3305 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3306 OP_ALG_AAI_HMAC_PRECOMP,
3307 .rfc3686 = true,
3308 .geniv = true,
3309 },
3310 },
3311 {
3312 .aead = {
3313 .base = {
3314 .cra_name = "authenc(hmac(sha512),"
3315 "rfc3686(ctr(aes)))",
3316 .cra_driver_name = "authenc-hmac-sha512-"
3317 "rfc3686-ctr-aes-caam",
3318 .cra_blocksize = 1,
3319 },
3320 .setkey = aead_setkey,
3321 .setauthsize = aead_setauthsize,
3322 .encrypt = aead_encrypt,
3323 .decrypt = aead_decrypt,
3324 .ivsize = CTR_RFC3686_IV_SIZE,
3325 .maxauthsize = SHA512_DIGEST_SIZE,
3326 },
3327 .caam = {
3328 .class1_alg_type = OP_ALG_ALGSEL_AES |
3329 OP_ALG_AAI_CTR_MOD128,
3330 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3331 OP_ALG_AAI_HMAC_PRECOMP,
3332 .rfc3686 = true,
3333 },
3334 },
3335 {
3336 .aead = {
3337 .base = {
3338 .cra_name = "seqiv(authenc(hmac(sha512),"
3339 "rfc3686(ctr(aes))))",
3340 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
3341 "rfc3686-ctr-aes-caam",
3342 .cra_blocksize = 1,
3343 },
3344 .setkey = aead_setkey,
3345 .setauthsize = aead_setauthsize,
3346 .encrypt = aead_encrypt,
3347 .decrypt = aead_decrypt,
3348 .ivsize = CTR_RFC3686_IV_SIZE,
3349 .maxauthsize = SHA512_DIGEST_SIZE,
3350 },
3351 .caam = {
3352 .class1_alg_type = OP_ALG_ALGSEL_AES |
3353 OP_ALG_AAI_CTR_MOD128,
3354 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3355 OP_ALG_AAI_HMAC_PRECOMP,
3356 .rfc3686 = true,
3357 .geniv = true,
3358 },
3359 },
David Brazdil0f672f62019-12-10 10:32:29 +00003360 {
3361 .aead = {
3362 .base = {
3363 .cra_name = "rfc7539(chacha20,poly1305)",
3364 .cra_driver_name = "rfc7539-chacha20-poly1305-"
3365 "caam",
3366 .cra_blocksize = 1,
3367 },
3368 .setkey = chachapoly_setkey,
3369 .setauthsize = chachapoly_setauthsize,
3370 .encrypt = chachapoly_encrypt,
3371 .decrypt = chachapoly_decrypt,
3372 .ivsize = CHACHAPOLY_IV_SIZE,
3373 .maxauthsize = POLY1305_DIGEST_SIZE,
3374 },
3375 .caam = {
3376 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3377 OP_ALG_AAI_AEAD,
3378 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3379 OP_ALG_AAI_AEAD,
3380 .nodkp = true,
3381 },
3382 },
3383 {
3384 .aead = {
3385 .base = {
3386 .cra_name = "rfc7539esp(chacha20,poly1305)",
3387 .cra_driver_name = "rfc7539esp-chacha20-"
3388 "poly1305-caam",
3389 .cra_blocksize = 1,
3390 },
3391 .setkey = chachapoly_setkey,
3392 .setauthsize = chachapoly_setauthsize,
3393 .encrypt = chachapoly_encrypt,
3394 .decrypt = chachapoly_decrypt,
3395 .ivsize = 8,
3396 .maxauthsize = POLY1305_DIGEST_SIZE,
3397 },
3398 .caam = {
3399 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3400 OP_ALG_AAI_AEAD,
3401 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3402 OP_ALG_AAI_AEAD,
3403 .nodkp = true,
3404 },
3405 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003406};
3407
3408static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3409 bool uses_dkp)
3410{
3411 dma_addr_t dma_addr;
3412 struct caam_drv_private *priv;
3413
3414 ctx->jrdev = caam_jr_alloc();
3415 if (IS_ERR(ctx->jrdev)) {
3416 pr_err("Job Ring Device allocation for transform failed\n");
3417 return PTR_ERR(ctx->jrdev);
3418 }
3419
3420 priv = dev_get_drvdata(ctx->jrdev->parent);
3421 if (priv->era >= 6 && uses_dkp)
3422 ctx->dir = DMA_BIDIRECTIONAL;
3423 else
3424 ctx->dir = DMA_TO_DEVICE;
3425
3426 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
3427 offsetof(struct caam_ctx,
3428 sh_desc_enc_dma),
3429 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3430 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
3431 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
3432 caam_jr_free(ctx->jrdev);
3433 return -ENOMEM;
3434 }
3435
3436 ctx->sh_desc_enc_dma = dma_addr;
3437 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3438 sh_desc_dec);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003439 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
3440
3441 /* copy descriptor header template value */
3442 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3443 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
3444
3445 return 0;
3446}
3447
David Brazdil0f672f62019-12-10 10:32:29 +00003448static int caam_cra_init(struct crypto_skcipher *tfm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003449{
David Brazdil0f672f62019-12-10 10:32:29 +00003450 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3451 struct caam_skcipher_alg *caam_alg =
3452 container_of(alg, typeof(*caam_alg), skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003453
David Brazdil0f672f62019-12-10 10:32:29 +00003454 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
3455 false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003456}
3457
3458static int caam_aead_init(struct crypto_aead *tfm)
3459{
3460 struct aead_alg *alg = crypto_aead_alg(tfm);
3461 struct caam_aead_alg *caam_alg =
3462 container_of(alg, struct caam_aead_alg, aead);
3463 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
3464
David Brazdil0f672f62019-12-10 10:32:29 +00003465 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003466}
3467
3468static void caam_exit_common(struct caam_ctx *ctx)
3469{
3470 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3471 offsetof(struct caam_ctx, sh_desc_enc_dma),
3472 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3473 caam_jr_free(ctx->jrdev);
3474}
3475
David Brazdil0f672f62019-12-10 10:32:29 +00003476static void caam_cra_exit(struct crypto_skcipher *tfm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003477{
David Brazdil0f672f62019-12-10 10:32:29 +00003478 caam_exit_common(crypto_skcipher_ctx(tfm));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003479}
3480
3481static void caam_aead_exit(struct crypto_aead *tfm)
3482{
3483 caam_exit_common(crypto_aead_ctx(tfm));
3484}
3485
David Brazdil0f672f62019-12-10 10:32:29 +00003486void caam_algapi_exit(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003487{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003488 int i;
3489
3490 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3491 struct caam_aead_alg *t_alg = driver_aeads + i;
3492
3493 if (t_alg->registered)
3494 crypto_unregister_aead(&t_alg->aead);
3495 }
3496
David Brazdil0f672f62019-12-10 10:32:29 +00003497 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3498 struct caam_skcipher_alg *t_alg = driver_algs + i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003499
David Brazdil0f672f62019-12-10 10:32:29 +00003500 if (t_alg->registered)
3501 crypto_unregister_skcipher(&t_alg->skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003502 }
3503}
3504
David Brazdil0f672f62019-12-10 10:32:29 +00003505static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003506{
David Brazdil0f672f62019-12-10 10:32:29 +00003507 struct skcipher_alg *alg = &t_alg->skcipher;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003508
David Brazdil0f672f62019-12-10 10:32:29 +00003509 alg->base.cra_module = THIS_MODULE;
3510 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3511 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3512 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003513
David Brazdil0f672f62019-12-10 10:32:29 +00003514 alg->init = caam_cra_init;
3515 alg->exit = caam_cra_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003516}
3517
3518static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3519{
3520 struct aead_alg *alg = &t_alg->aead;
3521
3522 alg->base.cra_module = THIS_MODULE;
3523 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3524 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3525 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3526
3527 alg->init = caam_aead_init;
3528 alg->exit = caam_aead_exit;
3529}
3530
David Brazdil0f672f62019-12-10 10:32:29 +00003531int caam_algapi_init(struct device *ctrldev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003532{
David Brazdil0f672f62019-12-10 10:32:29 +00003533 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003534 int i = 0, err = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00003535 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
3536 u32 arc4_inst;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003537 unsigned int md_limit = SHA512_DIGEST_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00003538 bool registered = false, gcm_support;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003539
3540 /*
3541 * Register crypto algorithms the device supports.
3542 * First, detect presence and attributes of DES, AES, and MD blocks.
3543 */
David Brazdil0f672f62019-12-10 10:32:29 +00003544 if (priv->era < 10) {
3545 u32 cha_vid, cha_inst, aes_rn;
3546
3547 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3548 aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
3549 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3550
3551 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
3552 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
3553 CHA_ID_LS_DES_SHIFT;
3554 aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
3555 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3556 arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
3557 CHA_ID_LS_ARC4_SHIFT;
3558 ccha_inst = 0;
3559 ptha_inst = 0;
3560
3561 aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
3562 CHA_ID_LS_AES_MASK;
3563 gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
3564 } else {
3565 u32 aesa, mdha;
3566
3567 aesa = rd_reg32(&priv->ctrl->vreg.aesa);
3568 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
3569
3570 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3571 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3572
3573 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
3574 aes_inst = aesa & CHA_VER_NUM_MASK;
3575 md_inst = mdha & CHA_VER_NUM_MASK;
3576 ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
3577 ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
3578 arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
3579
3580 gcm_support = aesa & CHA_VER_MISC_AES_GCM;
3581 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003582
3583 /* If MD is present, limit digest size based on LP256 */
David Brazdil0f672f62019-12-10 10:32:29 +00003584 if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003585 md_limit = SHA256_DIGEST_SIZE;
3586
3587 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00003588 struct caam_skcipher_alg *t_alg = driver_algs + i;
3589 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003590
3591 /* Skip DES algorithms if not supported by device */
3592 if (!des_inst &&
3593 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3594 (alg_sel == OP_ALG_ALGSEL_DES)))
3595 continue;
3596
3597 /* Skip AES algorithms if not supported by device */
3598 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3599 continue;
3600
David Brazdil0f672f62019-12-10 10:32:29 +00003601 /* Skip ARC4 algorithms if not supported by device */
3602 if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
3603 continue;
3604
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003605 /*
3606 * Check support for AES modes not available
3607 * on LP devices.
3608 */
David Brazdil0f672f62019-12-10 10:32:29 +00003609 if (aes_vid == CHA_VER_VID_AES_LP &&
3610 (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
3611 OP_ALG_AAI_XTS)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003612 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003613
David Brazdil0f672f62019-12-10 10:32:29 +00003614 caam_skcipher_alg_init(t_alg);
3615
3616 err = crypto_register_skcipher(&t_alg->skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003617 if (err) {
3618 pr_warn("%s alg registration failed\n",
David Brazdil0f672f62019-12-10 10:32:29 +00003619 t_alg->skcipher.base.cra_driver_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003620 continue;
3621 }
3622
David Brazdil0f672f62019-12-10 10:32:29 +00003623 t_alg->registered = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003624 registered = true;
3625 }
3626
3627 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3628 struct caam_aead_alg *t_alg = driver_aeads + i;
3629 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3630 OP_ALG_ALGSEL_MASK;
3631 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3632 OP_ALG_ALGSEL_MASK;
3633 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3634
3635 /* Skip DES algorithms if not supported by device */
3636 if (!des_inst &&
3637 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3638 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3639 continue;
3640
3641 /* Skip AES algorithms if not supported by device */
3642 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3643 continue;
3644
David Brazdil0f672f62019-12-10 10:32:29 +00003645 /* Skip CHACHA20 algorithms if not supported by device */
3646 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
3647 continue;
3648
3649 /* Skip POLY1305 algorithms if not supported by device */
3650 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
3651 continue;
3652
3653 /* Skip GCM algorithms if not supported by device */
3654 if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
3655 alg_aai == OP_ALG_AAI_GCM && !gcm_support)
3656 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003657
3658 /*
3659 * Skip algorithms requiring message digests
3660 * if MD or MD size is not supported by device.
3661 */
David Brazdil0f672f62019-12-10 10:32:29 +00003662 if (is_mdha(c2_alg_sel) &&
3663 (!md_inst || t_alg->aead.maxauthsize > md_limit))
3664 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003665
3666 caam_aead_alg_init(t_alg);
3667
3668 err = crypto_register_aead(&t_alg->aead);
3669 if (err) {
3670 pr_warn("%s alg registration failed\n",
3671 t_alg->aead.base.cra_driver_name);
3672 continue;
3673 }
3674
3675 t_alg->registered = true;
3676 registered = true;
3677 }
3678
3679 if (registered)
3680 pr_info("caam algorithms registered in /proc/crypto\n");
3681
3682 return err;
3683}